comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
There are no scenarios where we would expect a null response unless there was an error on the service, which in theory should be very rare. If we don't get any response I don't know what message could be helpful. | public Response<CommunicationRelayConfiguration> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
Response<CommunicationRelayConfiguration> response =
client.issueRelayConfigurationWithResponseAsync(body, context).block();
if (response == null || response.getValue() == null) {
throw logger.logExceptionAsError(new IllegalStateException("Service failed to return a response or expected value."));
}
return new SimpleResponse<CommunicationRelayConfiguration>(
response,
response.getValue());
} | if (response == null || response.getValue() == null) { | public Response<CommunicationRelayConfiguration> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
Response<CommunicationRelayConfiguration> response =
client.getRelayConfigurationWithResponse(communicationUser, context).block();
return response;
} | class CommunicationRelayClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayClient.class);
CommunicationRelayClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CommunicationRelayConfiguration getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfiguration(body);
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The obtained Communication Relay Configuration
*/
} | class CommunicationRelayClient {
private final CommunicationRelayAsyncClient client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayClient.class);
CommunicationRelayClient(CommunicationRelayAsyncClient communicationNetworkingClient) {
client = communicationNetworkingClient;
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CommunicationRelayConfiguration getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return client.getRelayConfiguration(communicationUser).block();
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
} |
You should be calling `issueRelayConfigurationAsync`. `issueRelayConfiguration` is blocking and that is the opposite of what we want to do in an async client. | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return Mono.just(client.issueRelayConfiguration(body));
} | return Mono.just(client.issueRelayConfiguration(body)); | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body)
.onErrorMap(CommunicationErrorResponseException.class, e -> translateException(e))
.flatMap(
(Response<CommunicationRelayConfiguration> response) -> {
return Mono.just(
new SimpleResponse<CommunicationRelayConfiguration>(
response,
response.getValue()));
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
private CommunicationErrorResponseException translateException(CommunicationErrorResponseException exception) {
CommunicationErrorResponse error = null;
if (exception.getValue() != null) {
error = exception.getValue();
}
return new CommunicationErrorResponseException(exception.getMessage(), exception.getResponse(), error);
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} |
I think you should look into why the compiler is complaining because you should not need this block of code. Identity uses a flatmap because it needs to manipulate the response type, that is not the case here | public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body)
.onErrorMap(CommunicationErrorResponseException.class, e -> translateException(e))
.flatMap(
(Response<CommunicationRelayConfiguration> response) -> {
return Mono.just(
new SimpleResponse<CommunicationRelayConfiguration>(
response,
response.getValue()));
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
} | response.getValue())); | new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body =
private CommunicationErrorResponseException translateException(CommunicationErrorResponseException exception) {
CommunicationErrorResponse error = null;
if (exception.getValue() != null) {
error = exception.getValue();
}
return new CommunicationErrorResponseException(exception.getMessage(), exception.getResponse(), error);
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body =
}
} |
We usually don't print things out in tests. Any reason why we need this here? | public void createRelayClientUsingConnectionString(HttpClient httpClient) {
CommunicationRelayClientBuilder builder = createClientBuilderUsingConnectionString(httpClient);
asyncClient = setupAsyncClient(builder, "createIdentityClientUsingConnectionStringSync");
assertNotNull(asyncClient);
String connectionString = System.getenv("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING");
CommunicationIdentityAsyncClient communicationIdentityClient = new CommunicationIdentityClientBuilder()
.connectionString(connectionString)
.buildAsyncClient();
Mono<CommunicationUserIdentifier> response = communicationIdentityClient.createUser();
CommunicationUserIdentifier user = response.block();
StepVerifier.create(response)
.assertNext(item -> {
assertNotNull(item.getId());
}).verifyComplete();
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
assertNotNull(iceS.getUrls());
System.out.println("Urls:" + iceS.getUrls());
assertNotNull(iceS.getUsername());
System.out.println("Username: " + iceS.getUsername());
assertNotNull(iceS.getCredential());
System.out.println("Credential: " + iceS.getCredential());
}
}).verifyComplete();
}
} | System.out.println("Urls:" + iceS.getUrls()); | public void createRelayClientUsingConnectionString(HttpClient httpClient) {
setupTest(httpClient);
CommunicationRelayClientBuilder builder = createClientBuilderUsingConnectionString(httpClient);
asyncClient = setupAsyncClient(builder, "createIdentityClientUsingConnectionStringSync");
assertNotNull(asyncClient);
assertNotNull(user.getId());
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
assertNotNull(iceS.getUrls());
assertNotNull(iceS.getUsername());
assertNotNull(iceS.getCredential());
}
}).verifyComplete();
}
} | class CommunicationRelayAsyncTests extends CommunicationRelayClientTestBase {
private CommunicationRelayAsyncClient asyncClient;
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void createRelayClientUsingManagedIdentity(HttpClient httpClient) {
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
CommunicationIdentityClientBuilder identityBuilder = createIdentityClientBuilder(httpClient);
CommunicationIdentityAsyncClient communicationIdentityClient = setupIdentityAsyncClient(identityBuilder, "createRelayClientUsingManagedIdentity");
Mono<CommunicationUserIdentifier> response = communicationIdentityClient.createUser();
CommunicationUserIdentifier user = response.block();
StepVerifier.create(response)
.assertNext(item -> {
assertNotNull(item.getId());
}).verifyComplete();
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
System.out.println("Urls:" + iceS.getUrls());
assertNotNull(iceS.getUsername());
System.out.println("Username: " + iceS.getUsername());
assertNotNull(iceS.getCredential());
System.out.println("Credential: " + iceS.getCredential());
}
}).verifyComplete();
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void getRelayConfigWithResponse(HttpClient httpClient) {
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
CommunicationIdentityClientBuilder identityBuilder = createIdentityClientBuilder(httpClient);
CommunicationIdentityAsyncClient communicationIdentityClient = setupIdentityAsyncClient(identityBuilder, "createRelayClientUsingManagedIdentity");
Mono<CommunicationUserIdentifier> responseUser = communicationIdentityClient.createUser();
CommunicationUserIdentifier user = responseUser.block();
StepVerifier.create(responseUser)
.assertNext(item -> {
assertNotNull(item.getId());
}).verifyComplete();
if (user != null) {
Mono<Response<CommunicationRelayConfiguration>> relayConfig = asyncClient.getRelayConfigurationWithResponse(user);
StepVerifier.create(relayConfig)
.assertNext(response -> {
assertEquals(200, response.getStatusCode(), "Expect status code to be 200");
assertNotNull(response.getValue().getIceServers());
for (CommunicationIceServer iceS : response.getValue().getIceServers()) {
assertNotNull(iceS.getUrls());
System.out.println("Urls: " + iceS.getUrls());
assertNotNull(iceS.getUsername());
System.out.println("Username: " + iceS.getUsername());
assertNotNull(iceS.getCredential());
System.out.println("Credential: " + iceS.getCredential());
}
}).verifyComplete();
}
}
private CommunicationRelayAsyncClient setupAsyncClient(CommunicationRelayClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
private CommunicationIdentityAsyncClient setupIdentityAsyncClient(CommunicationIdentityClientBuilder builder, String testName) {
return addLoggingPolicyIdentity(builder, testName).buildAsyncClient();
}
} | class CommunicationRelayAsyncTests extends CommunicationRelayClientTestBase {
private CommunicationRelayAsyncClient asyncClient;
private CommunicationUserIdentifier user;
private void setupTest(HttpClient httpClient) {
CommunicationIdentityClient communicationIdentityClient = createIdentityClientBuilder(httpClient).buildClient();
user = communicationIdentityClient.createUser();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void createRelayClientUsingManagedIdentity(HttpClient httpClient) {
setupTest(httpClient);
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
assertNotNull(user.getId());
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
assertNotNull(iceS.getUsername());
assertNotNull(iceS.getCredential());
}
}).verifyComplete();
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void getRelayConfigWithResponse(HttpClient httpClient) {
setupTest(httpClient);
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
assertNotNull(user.getId());
if (user != null) {
Mono<Response<CommunicationRelayConfiguration>> relayConfig = asyncClient.getRelayConfigurationWithResponse(user);
StepVerifier.create(relayConfig)
.assertNext(response -> {
assertEquals(200, response.getStatusCode(), "Expect status code to be 200");
assertNotNull(response.getValue().getIceServers());
for (CommunicationIceServer iceS : response.getValue().getIceServers()) {
assertNotNull(iceS.getUrls());
assertNotNull(iceS.getUsername());
assertNotNull(iceS.getCredential());
}
}).verifyComplete();
}
}
private CommunicationRelayAsyncClient setupAsyncClient(CommunicationRelayClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
} |
Same here | public void getRelayConfigWithResponse(HttpClient httpClient) {
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
CommunicationIdentityClientBuilder identityBuilder = createIdentityClientBuilder(httpClient);
CommunicationIdentityAsyncClient communicationIdentityClient = setupIdentityAsyncClient(identityBuilder, "createRelayClientUsingManagedIdentity");
Mono<CommunicationUserIdentifier> responseUser = communicationIdentityClient.createUser();
CommunicationUserIdentifier user = responseUser.block();
StepVerifier.create(responseUser)
.assertNext(item -> {
assertNotNull(item.getId());
}).verifyComplete();
if (user != null) {
Mono<Response<CommunicationRelayConfiguration>> relayConfig = asyncClient.getRelayConfigurationWithResponse(user);
StepVerifier.create(relayConfig)
.assertNext(response -> {
assertEquals(200, response.getStatusCode(), "Expect status code to be 200");
assertNotNull(response.getValue().getIceServers());
for (CommunicationIceServer iceS : response.getValue().getIceServers()) {
assertNotNull(iceS.getUrls());
System.out.println("Urls: " + iceS.getUrls());
assertNotNull(iceS.getUsername());
System.out.println("Username: " + iceS.getUsername());
assertNotNull(iceS.getCredential());
System.out.println("Credential: " + iceS.getCredential());
}
}).verifyComplete();
}
} | System.out.println("Urls: " + iceS.getUrls()); | public void getRelayConfigWithResponse(HttpClient httpClient) {
setupTest(httpClient);
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
assertNotNull(user.getId());
if (user != null) {
Mono<Response<CommunicationRelayConfiguration>> relayConfig = asyncClient.getRelayConfigurationWithResponse(user);
StepVerifier.create(relayConfig)
.assertNext(response -> {
assertEquals(200, response.getStatusCode(), "Expect status code to be 200");
assertNotNull(response.getValue().getIceServers());
for (CommunicationIceServer iceS : response.getValue().getIceServers()) {
assertNotNull(iceS.getUrls());
assertNotNull(iceS.getUsername());
assertNotNull(iceS.getCredential());
}
}).verifyComplete();
}
} | class CommunicationRelayAsyncTests extends CommunicationRelayClientTestBase {
private CommunicationRelayAsyncClient asyncClient;
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void createRelayClientUsingManagedIdentity(HttpClient httpClient) {
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
CommunicationIdentityClientBuilder identityBuilder = createIdentityClientBuilder(httpClient);
CommunicationIdentityAsyncClient communicationIdentityClient = setupIdentityAsyncClient(identityBuilder, "createRelayClientUsingManagedIdentity");
Mono<CommunicationUserIdentifier> response = communicationIdentityClient.createUser();
CommunicationUserIdentifier user = response.block();
StepVerifier.create(response)
.assertNext(item -> {
assertNotNull(item.getId());
}).verifyComplete();
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
System.out.println("Urls:" + iceS.getUrls());
assertNotNull(iceS.getUsername());
System.out.println("Username: " + iceS.getUsername());
assertNotNull(iceS.getCredential());
System.out.println("Credential: " + iceS.getCredential());
}
}).verifyComplete();
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void createRelayClientUsingConnectionString(HttpClient httpClient) {
CommunicationRelayClientBuilder builder = createClientBuilderUsingConnectionString(httpClient);
asyncClient = setupAsyncClient(builder, "createIdentityClientUsingConnectionStringSync");
assertNotNull(asyncClient);
String connectionString = System.getenv("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING");
CommunicationIdentityAsyncClient communicationIdentityClient = new CommunicationIdentityClientBuilder()
.connectionString(connectionString)
.buildAsyncClient();
Mono<CommunicationUserIdentifier> response = communicationIdentityClient.createUser();
CommunicationUserIdentifier user = response.block();
StepVerifier.create(response)
.assertNext(item -> {
assertNotNull(item.getId());
}).verifyComplete();
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
assertNotNull(iceS.getUrls());
System.out.println("Urls:" + iceS.getUrls());
assertNotNull(iceS.getUsername());
System.out.println("Username: " + iceS.getUsername());
assertNotNull(iceS.getCredential());
System.out.println("Credential: " + iceS.getCredential());
}
}).verifyComplete();
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
private CommunicationRelayAsyncClient setupAsyncClient(CommunicationRelayClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
private CommunicationIdentityAsyncClient setupIdentityAsyncClient(CommunicationIdentityClientBuilder builder, String testName) {
return addLoggingPolicyIdentity(builder, testName).buildAsyncClient();
}
} | class CommunicationRelayAsyncTests extends CommunicationRelayClientTestBase {
private CommunicationRelayAsyncClient asyncClient;
private CommunicationUserIdentifier user;
private void setupTest(HttpClient httpClient) {
CommunicationIdentityClient communicationIdentityClient = createIdentityClientBuilder(httpClient).buildClient();
user = communicationIdentityClient.createUser();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void createRelayClientUsingManagedIdentity(HttpClient httpClient) {
setupTest(httpClient);
CommunicationRelayClientBuilder builder = createClientBuilderUsingManagedIdentity(httpClient);
asyncClient = setupAsyncClient(builder, "createRelayClientUsingManagedIdentitySync");
assertNotNull(asyncClient);
assertNotNull(user.getId());
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
assertNotNull(iceS.getUsername());
assertNotNull(iceS.getCredential());
}
}).verifyComplete();
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void createRelayClientUsingConnectionString(HttpClient httpClient) {
setupTest(httpClient);
CommunicationRelayClientBuilder builder = createClientBuilderUsingConnectionString(httpClient);
asyncClient = setupAsyncClient(builder, "createIdentityClientUsingConnectionStringSync");
assertNotNull(asyncClient);
assertNotNull(user.getId());
if (user != null) {
Mono<CommunicationRelayConfiguration> relayResponse = asyncClient.getRelayConfiguration(user);
StepVerifier.create(relayResponse)
.assertNext(relayConfig -> {
assertNotNull(relayConfig.getIceServers());
for (CommunicationIceServer iceS : relayConfig.getIceServers()) {
assertNotNull(iceS.getUrls());
assertNotNull(iceS.getUsername());
assertNotNull(iceS.getCredential());
}
}).verifyComplete();
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
private CommunicationRelayAsyncClient setupAsyncClient(CommunicationRelayClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
} |
Good call. | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return Mono.just(client.issueRelayConfiguration(body));
} | return Mono.just(client.issueRelayConfiguration(body)); | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body)
.onErrorMap(CommunicationErrorResponseException.class, e -> translateException(e))
.flatMap(
(Response<CommunicationRelayConfiguration> response) -> {
return Mono.just(
new SimpleResponse<CommunicationRelayConfiguration>(
response,
response.getValue()));
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
private CommunicationErrorResponseException translateException(CommunicationErrorResponseException exception) {
CommunicationErrorResponse error = null;
if (exception.getValue() != null) {
error = exception.getValue();
}
return new CommunicationErrorResponseException(exception.getMessage(), exception.getResponse(), error);
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} |
Call the `getRelayConfigurationWithResponse` method instead of duplicating the code in both methods. | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationAsync(body);
} | return client.issueRelayConfigurationAsync(body); | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body)
.onErrorMap(CommunicationErrorResponseException.class, e -> translateException(e));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
private CommunicationErrorResponseException translateException(CommunicationErrorResponseException exception) {
CommunicationErrorResponse error = null;
if (exception.getValue() != null) {
error = exception.getValue();
}
return new CommunicationErrorResponseException(exception.getMessage(), exception.getResponse(), error);
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} |
Probably you were looking at an old version of the file but getRelayConfiguration is calling client.issueRelayConfigurationAsync(body); and getRelayConfigurationWithResponse is calling issueRelayConfigurationWithResponseAsync so there's no duplicate code. | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationAsync(body);
} | return client.issueRelayConfigurationAsync(body); | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body)
.onErrorMap(CommunicationErrorResponseException.class, e -> translateException(e));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
private CommunicationErrorResponseException translateException(CommunicationErrorResponseException exception) {
CommunicationErrorResponse error = null;
if (exception.getValue() != null) {
error = exception.getValue();
}
return new CommunicationErrorResponseException(exception.getMessage(), exception.getResponse(), error);
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} |
This should be using `withContext(context -> { })` pattern to pass the context to the pipeline. | public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body)
.onErrorMap(CommunicationErrorResponseException.class, e -> translateException(e));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
} | return client.issueRelayConfigurationWithResponseAsync(body) | new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body =
private CommunicationErrorResponseException translateException(CommunicationErrorResponseException exception) {
CommunicationErrorResponse error = null;
if (exception.getValue() != null) {
error = exception.getValue();
}
return new CommunicationErrorResponseException(exception.getMessage(), exception.getResponse(), error);
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body =
}
} |
We should not be handling null here if it isn't an expected response. If a null is returned that's an invalid service response and the pipeline upstream should handle it. | public Response<CommunicationRelayConfiguration> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
Response<CommunicationRelayConfiguration> response =
client.issueRelayConfigurationWithResponseAsync(body, context).block();
if (response == null || response.getValue() == null) {
throw logger.logExceptionAsError(new IllegalStateException("Service failed to return a response or expected value."));
}
return new SimpleResponse<CommunicationRelayConfiguration>(
response,
response.getValue());
} | if (response == null || response.getValue() == null) { | public Response<CommunicationRelayConfiguration> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
Response<CommunicationRelayConfiguration> response =
client.getRelayConfigurationWithResponse(communicationUser, context).block();
return response;
} | class CommunicationRelayClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayClient.class);
CommunicationRelayClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CommunicationRelayConfiguration getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfiguration(body);
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The obtained Communication Relay Configuration
*/
} | class CommunicationRelayClient {
private final CommunicationRelayAsyncClient client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayClient.class);
CommunicationRelayClient(CommunicationRelayAsyncClient communicationNetworkingClient) {
client = communicationNetworkingClient;
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CommunicationRelayConfiguration getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return client.getRelayConfiguration(communicationUser).block();
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
} |
This check shouldn't happen here either. All these checks should be done in the build method. | public CommunicationRelayClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(createServiceImpl());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint, "'ednpoint' cannot be null.");
Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null.");
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
We should add javadoc for all samples as it would help the user understand what scenaio this sample is demonstrating. | public static void main(String[] args) {
String connectionString = System.getenv("COMMUNICATION_SAMPLES_CONNECTION_STRING");
CommunicationIdentityClient communicationIdentityClient = new CommunicationIdentityClientBuilder()
.connectionString(connectionString)
.buildClient();
CommunicationRelayClient communicationRelayClient = new CommunicationRelayClientBuilder()
.connectionString(connectionString)
.buildClient();
CommunicationUserIdentifier user = communicationIdentityClient.createUser();
System.out.println("User id: " + user.getId());
CommunicationRelayConfiguration config = communicationRelayClient.getRelayConfiguration(user);
System.out.println("Expires on:" + config.getExpiresOn());
List<CommunicationIceServer> iceServers = config.getIceServers();
for (CommunicationIceServer iceS : iceServers) {
System.out.println("URLS: " + iceS.getUrls());
System.out.println("Username: " + iceS.getUsername());
System.out.println("credential: " + iceS.getCredential());
}
} | public static void main(String[] args) {
String connectionString = System.getenv("COMMUNICATION_SAMPLES_CONNECTION_STRING");
CommunicationIdentityClient communicationIdentityClient = new CommunicationIdentityClientBuilder()
.connectionString(connectionString)
.buildClient();
CommunicationRelayClient communicationRelayClient = new CommunicationRelayClientBuilder()
.connectionString(connectionString)
.buildClient();
CommunicationUserIdentifier user = communicationIdentityClient.createUser();
System.out.println("User id: " + user.getId());
CommunicationRelayConfiguration config = communicationRelayClient.getRelayConfiguration(user);
System.out.println("Expires on:" + config.getExpiresOn());
List<CommunicationIceServer> iceServers = config.getIceServers();
for (CommunicationIceServer iceS : iceServers) {
System.out.println("URLS: " + iceS.getUrls());
System.out.println("Username: " + iceS.getUsername());
System.out.println("credential: " + iceS.getCredential());
}
} | class CreateAndIssueRelayCredentialsExample {
} | class CreateAndIssueRelayCredentialsExample {
} | |
I think this can be an exception since "connectionString" is not global, we can only check it here. | public CommunicationRelayClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(createServiceImpl());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint, "'ednpoint' cannot be null.");
Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null.");
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
```suggestion return this.getRelayConfigurationWithResponse(communicationUser).map(FluxUtil::toMono); ``` | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return withContext(context -> client.issueRelayConfigurationAsync(body, context));
} | return withContext(context -> client.issueRelayConfigurationAsync(body, context)); | public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return withContext(context -> client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} |
```suggestion return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context)); ``` | public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return withContext(context -> client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
} | } | new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
CommunicationRelayConfigurationRequest body =
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
try {
CommunicationRelayConfigurationRequest body = new CommunicationRelayConfigurationRequest();
body.setId(communicationUser.getId());
return client.issueRelayConfigurationWithResponseAsync(body, context)
.onErrorMap(CommunicationErrorResponseException.class, e -> e);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} | class CommunicationRelayAsyncClient {
private final CommunicationNetworkTraversalsImpl client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayAsyncClient.class);
CommunicationRelayAsyncClient(CommunicationNetworkingClientImpl communicationNetworkingClient) {
client = communicationNetworkingClient.getCommunicationNetworkTraversals();
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CommunicationRelayConfiguration> getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return this.getRelayConfigurationWithResponse(communicationUser).flatMap(FluxUtil::toMono);
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The created Communication Relay Configuration.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser) {
return withContext(context -> getRelayConfigurationWithResponse(communicationUser, context));
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The created Communication Relay Configuration.
*/
Mono<Response<CommunicationRelayConfiguration>> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
try {
CommunicationRelayConfigurationRequest body =
}
} |
This check should be moved to the package-private method in async client. | public Response<CommunicationRelayConfiguration> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
context = context == null ? Context.NONE : context;
Response<CommunicationRelayConfiguration> response =
client.getRelayConfigurationWithResponse(communicationUser, context).block();
return response;
} | context = context == null ? Context.NONE : context; | public Response<CommunicationRelayConfiguration> getRelayConfigurationWithResponse(CommunicationUserIdentifier communicationUser, Context context) {
Response<CommunicationRelayConfiguration> response =
client.getRelayConfigurationWithResponse(communicationUser, context).block();
return response;
} | class CommunicationRelayClient {
private final CommunicationRelayAsyncClient client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayClient.class);
CommunicationRelayClient(CommunicationRelayAsyncClient communicationNetworkingClient) {
client = communicationNetworkingClient;
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CommunicationRelayConfiguration getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return client.getRelayConfiguration(communicationUser).block();
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
} | class CommunicationRelayClient {
private final CommunicationRelayAsyncClient client;
private final ClientLogger logger = new ClientLogger(CommunicationRelayClient.class);
CommunicationRelayClient(CommunicationRelayAsyncClient communicationNetworkingClient) {
client = communicationNetworkingClient;
}
/**
* Creates a new CommunicationRelayConfiguration.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CommunicationRelayConfiguration getRelayConfiguration(CommunicationUserIdentifier communicationUser) {
return client.getRelayConfiguration(communicationUser).block();
}
/**
* Creates a new CommunicationRelayConfiguration with response.
*
* @param communicationUser The CommunicationUserIdentifier for whom to issue a token
* @param context A {@link Context} representing the request context.
* @return The obtained Communication Relay Configuration
*/
@ServiceMethod(returns = ReturnType.SINGLE)
} |
We can have another field on the builder called `connectionString`. | public CommunicationRelayClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(createServiceImpl());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint, "'ednpoint' cannot be null.");
Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null.");
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
This check is not required as this is already done in `buildAsyncClient()` that this method calls. | public CommunicationRelayClient buildClient() {
Objects.requireNonNull(endpoint, "'ednpoint' cannot be null.");
Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null.");
return new CommunicationRelayClient(buildAsyncClient());
} | Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null."); | public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'ednpoint' cannot be null.");
Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
Removed the check as suggested and actually we don't need to add check for null since ConnectionString adds endpoint and credential, so we have that check already and also there are scenarios where ConnectionString can be null (when credential and endpoint can be set directly) | public CommunicationRelayClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(createServiceImpl());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
Objects.requireNonNull(endpoint, "'ednpoint' cannot be null.");
Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null.");
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
Both are checking for null endpoint. | public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
Objects.requireNonNull(endpoint, "'credential' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
} | Objects.requireNonNull(endpoint, "'credential' cannot be null."); | public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
This should be done in the build method. Also, don't overwrite the `endpoint` that the user might have already set using the `endpoint()` method above. | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | .credential(new AzureKeyCredential(accessKey)); | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
Objects.requireNonNull(endpoint, "'credential' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
If they provide a Connection String, they shouldn't provide an endpoint before. That's how we are doing it for other languages for network-traversal and Identity. They can provide Connection String or they set endpoint and credential. So it's ok to override it. | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | .credential(new AzureKeyCredential(accessKey)); | public CommunicationRelayClientBuilder connectionString(String connectionString) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_IDENTITY_PROPERTIES =
"azure-communication-networktravesal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
Objects.requireNonNull(endpoint, "'credential' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} | class CommunicationRelayClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES =
"azure-communication-networktraversal.properties";
private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class);
private String endpoint;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private RetryPolicy retryPolicy;
private Configuration configuration;
private ClientOptions clientOptions;
private String connectionString;
private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
/**
* Set endpoint of the service
*
* @param endpoint url of the service
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/**
* Set endpoint of the service
*
* @param pipeline HttpPipeline to use, if a pipeline is not
* supplied, the credential and httpClient fields must be set
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = keyCredential;
return this;
}
/**
* Set endpoint and credential to use
*
* @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential
* @return CommunicationRelayClientBuilder
*/
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline
* field.
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy
* @return CommunicationRelayClientBuilder
*/
public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(customPolicy);
return this;
}
/**
* Sets the client options for all the requests made through the client.
*
* @param clientOptions {@link ClientOptions}.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link CommunicationRelayClientBuilder} object.
*/
public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests.
* @return the updated CommunicationRelayClientBuilder object
*/
public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) {
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayAsyncClient instance
*/
public CommunicationRelayAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return new CommunicationRelayAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return CommunicationRelayClient instance
*/
public CommunicationRelayClient buildClient() {
return new CommunicationRelayClient(buildAsyncClient());
}
private CommunicationNetworkingClientImpl createServiceImpl() {
HttpPipeline builderPipeline = this.pipeline;
if (this.pipeline == null) {
builderPipeline = createHttpPipeline(httpClient,
createHttpPipelineAuthPolicy(),
customPolicies);
}
CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder();
clientBuilder.endpoint(endpoint)
.pipeline(builderPipeline);
return clientBuilder.buildClient();
}
private HttpPipelinePolicy createHttpPipelineAuthPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
}
private HttpPipeline createHttpPipeline(HttpClient httpClient,
HttpPipelinePolicy authorizationPolicy,
List<HttpPipelinePolicy> customPolicies) {
List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>();
applyRequiredPolicies(policies, authorizationPolicy);
if (customPolicies != null && customPolicies.size() > 0) {
policies.addAll(customPolicies);
}
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.build();
}
private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) {
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policies.add(new RequestIdPolicy());
policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy);
policies.add(new CookiePolicy());
policies.add(authorizationPolicy);
policies.add(new HttpLoggingPolicy(httpLogOptions));
}
} |
```suggestion Objects.requireNonNull(audience, "'audience' can't be null."); ``` #Resolved | public ContainerRegistryClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "audience can't be null");
this.audience = audience;
return this;
} | Objects.requireNonNull(audience, "audience can't be null"); | public ContainerRegistryClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "'audience' can't be null");
this.audience = audience;
return this;
} | class ContainerRegistryClientBuilder {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
private final ClientLogger logger = new ClientLogger(ContainerRegistryClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authenticate REST API calls.
*
* @param credential Azure token credentials used to authenticate HTTP requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryAsyncClient} or {@link ContainerRegistryClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryClientBuilder} object
*/
public ContainerRegistryClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryClient} or {@link ContainerRegistryAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
public ContainerRegistryClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryAsyncClient} is created.
* <p>
* If {@link | class ContainerRegistryClientBuilder {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
private final ClientLogger logger = new ClientLogger(ContainerRegistryClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authenticate REST API calls.
*
* @param credential Azure token credentials used to authenticate HTTP requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryAsyncClient} or {@link ContainerRegistryClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryClientBuilder} object
*/
public ContainerRegistryClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryClient} or {@link ContainerRegistryAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
public ContainerRegistryClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryAsyncClient} is created.
* <p>
* If {@link |
Were there any discussions on having this default to using the public cloud audience? #Resolved | public ContainerRegistryClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "audience can't be null");
this.audience = audience;
return this;
} | this.audience = audience; | public ContainerRegistryClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "'audience' can't be null");
this.audience = audience;
return this;
} | class ContainerRegistryClientBuilder {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
private final ClientLogger logger = new ClientLogger(ContainerRegistryClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authenticate REST API calls.
*
* @param credential Azure token credentials used to authenticate HTTP requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryAsyncClient} or {@link ContainerRegistryClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryClientBuilder} object
*/
public ContainerRegistryClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryClient} or {@link ContainerRegistryAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
public ContainerRegistryClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryAsyncClient} is created.
* <p>
* If {@link | class ContainerRegistryClientBuilder {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
private final ClientLogger logger = new ClientLogger(ContainerRegistryClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authenticate REST API calls.
*
* @param credential Azure token credentials used to authenticate HTTP requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryAsyncClient} or {@link ContainerRegistryClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryClientBuilder} object
*/
public ContainerRegistryClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryClient} or {@link ContainerRegistryAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
public ContainerRegistryClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryAsyncClient} is created.
* <p>
* If {@link |
Yes it was considered and we decided not to support it right now. The default value for the service will change in a few months and that would mean a breaking change for the SDK. To prevent that we decided to not support default in SDK until service changes happen. | public ContainerRegistryClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "audience can't be null");
this.audience = audience;
return this;
} | this.audience = audience; | public ContainerRegistryClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "'audience' can't be null");
this.audience = audience;
return this;
} | class ContainerRegistryClientBuilder {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
private final ClientLogger logger = new ClientLogger(ContainerRegistryClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authenticate REST API calls.
*
* @param credential Azure token credentials used to authenticate HTTP requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryAsyncClient} or {@link ContainerRegistryClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryClientBuilder} object
*/
public ContainerRegistryClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryClient} or {@link ContainerRegistryAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
public ContainerRegistryClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryAsyncClient} is created.
* <p>
* If {@link | class ContainerRegistryClientBuilder {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
private final ClientLogger logger = new ClientLogger(ContainerRegistryClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authenticate REST API calls.
*
* @param credential Azure token credentials used to authenticate HTTP requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryAsyncClient} or {@link ContainerRegistryClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryClientBuilder} object
*/
public ContainerRegistryClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
public ContainerRegistryClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryClient} or {@link ContainerRegistryAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
public ContainerRegistryClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryAsyncClient} is created.
* <p>
* If {@link |
nit: just make all the formats additional `append` calls | private String formatHelpString() {
return new StringBuilder()
.append("Package versions: ")
.append(String.format("%s=%s, ", ANNOTATIONS_PACKAGE_NAME, annotationsVersion.getVersionString()))
.append(String.format("%s=%s, ", CORE_PACKAGE_NAME, coreVersion.getVersionString()))
.append(String.format("%s=%s, ", DATABIND_PACKAGE_NAME, databindVersion.getVersionString()))
.append(String.format("%s=%s, ", XML_PACKAGE_NAME, xmlVersion.getVersionString()))
.append(String.format("%s=%s, ", JSR310_PACKAGE_NAME, jsr310Version.getVersionString()))
.append(String.format("azure-core=%s, ", AZURE_CORE_VERSION))
.append(String.format("critical errors found: %b", criticalErrorDetected))
.toString();
} | .append(String.format("%s=%s, ", ANNOTATIONS_PACKAGE_NAME, annotationsVersion.getVersionString())) | private String formatHelpString() {
return new StringBuilder()
.append("Package versions: ")
.append(ANNOTATIONS_PACKAGE_NAME)
.append("=")
.append(annotationsVersion.getVersionString())
.append(", ")
.append(CORE_PACKAGE_NAME)
.append("=")
.append(coreVersion.getVersionString())
.append(", ")
.append(DATABIND_PACKAGE_NAME)
.append("=")
.append(databindVersion.getVersionString())
.append(", ")
.append(XML_PACKAGE_NAME)
.append("=")
.append(xmlVersion.getVersionString())
.append(", ")
.append(JSR310_PACKAGE_NAME)
.append("=")
.append(jsr310Version.getVersionString())
.append(", ")
.append("azure-core=")
.append(AZURE_CORE_VERSION)
.toString();
} | class " + className, e);
return SemanticVersion.createInvalid();
}
}
private static JacksonVersion instance = null;
public static synchronized JacksonVersion getInstance() {
if (instance == null) {
instance = new JacksonVersion();
}
return instance;
} | class JacksonVersion {
private SemanticVersion annotationsVersion;
private SemanticVersion coreVersion;
private SemanticVersion databindVersion;
private SemanticVersion xmlVersion;
private SemanticVersion jsr310Version;
private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations";
private static final String CORE_PACKAGE_NAME = "jackson-core";
private static final String DATABIND_PACKAGE_NAME = "jackson-databind";
private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml";
private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310";
private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0");
private static final SemanticVersion MAX_SUPPORTED_VERSION = SemanticVersion.parse("2.12.4");
private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties";
private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version";
private static final String AZURE_CORE_VERSION = CoreUtils
.getProperties(AZURE_CORE_PROPERTIES_NAME)
.getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION);
private static JacksonVersion instance = null;
private final String helpString;
private final ClientLogger logger = new ClientLogger(JacksonVersion.class);
private JacksonVersion() {
annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty");
coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator");
databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper");
xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper");
jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule");
checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME);
checkVersion(coreVersion, CORE_PACKAGE_NAME);
checkVersion(databindVersion, DATABIND_PACKAGE_NAME);
checkVersion(xmlVersion, XML_PACKAGE_NAME);
checkVersion(jsr310Version, JSR310_PACKAGE_NAME);
helpString = formatHelpString();
logger.info(helpString);
}
/**
* Returns help info containing actual detected package versions.
*
* @return diagnostics information with detected versions.
*/
public String getHelpInfo() {
return helpString;
}
/**
* Gets {@code JacksonVersion} instance singleton.
*/
public static synchronized JacksonVersion getInstance() {
if (instance == null) {
instance = new JacksonVersion();
}
return instance;
}
/**
* Checks package version and logs if any issues detected.
*/
private void checkVersion(SemanticVersion version, String packageName) {
if (!version.isValid()) {
logger.warning("Could not find version of '{}'.", packageName);
}
if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) {
logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION);
}
if (version.getMajorVersion() > MAX_SUPPORTED_VERSION.getMajorVersion()) {
logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.",
version.getVersionString(),
packageName,
MAX_SUPPORTED_VERSION.getVersionString());
}
}
/**
* Generates help information with versions detected in runtime.
*/
} |
Maybe we can throw an exception here. The caller of this method may not realize that the returned value is invalid or has to compare the returned value to check if it's invalid. | public static SemanticVersion getPackageVersionForClass(String className) {
Objects.requireNonNull(className, "'className' cannot be null.");
try {
return SemanticVersion.getPackageVersion(Class.forName(className));
} catch (ClassNotFoundException e) {
return createInvalid();
}
} | return createInvalid(); | public static SemanticVersion getPackageVersionForClass(String className) {
try {
return getPackageVersion(Class.forName(className));
} catch (Throwable e) {
return SemanticVersion.createInvalid();
}
} | class name to get package version of.
* @return parsed {@link SemanticVersion} | class name to get package version of.
* @return parsed {@link SemanticVersion} |
Unfortunately, not all dependencies follow the strict rules of semver for the part after patch version. For e.g. [reactor-core](https://mvnrepository.com/artifact/io.projectreactor/reactor-core/3.3.19.RELEASE) used to have `major.minor.patch.release`. So, the patch version may not always be followed by a `-`. Even Jackson sometimes uses this type of versioning - https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-core/2.11.0.rc1 | public static SemanticVersion parse(String version) {
Objects.requireNonNull(version, "'version' cannot be null.");
String[] parts = version.split("\\.");
if (parts.length < 3) {
return createInvalid(version);
}
int majorDotIdx = version.indexOf('.');
int minorDotIdx = version.indexOf('.', majorDotIdx + 1);
if (majorDotIdx < 0 || minorDotIdx < 0) {
return createInvalid(version);
}
int patchEndIdx = version.indexOf('-', minorDotIdx + 1);
int extEndIdx = version.indexOf('+', minorDotIdx + 1);
if (patchEndIdx < 0) {
patchEndIdx = version.length();
}
if (extEndIdx < 0) {
extEndIdx = version.length();
}
patchEndIdx = Math.min(patchEndIdx, extEndIdx);
try {
Integer major = Integer.valueOf(version.substring(0, majorDotIdx));
Integer minor = Integer.valueOf(version.substring(majorDotIdx + 1, minorDotIdx));
Integer patch = Integer.valueOf(version.substring(minorDotIdx + 1, patchEndIdx));
return new SemanticVersion(major, minor, patch, version.substring(patchEndIdx, extEndIdx), version);
} catch (Throwable ex) {
return createInvalid(version);
}
} | int patchEndIdx = version.indexOf('-', minorDotIdx + 1); | public static SemanticVersion parse(String version) {
Objects.requireNonNull(version, "'version' cannot be null.");
String[] parts = version.split("\\.");
if (parts.length < 3) {
return createInvalid(version);
}
int majorDotIdx = version.indexOf('.');
int minorDotIdx = version.indexOf('.', majorDotIdx + 1);
if (majorDotIdx < 0 || minorDotIdx < 0) {
return createInvalid(version);
}
int patchEndIdx = minorDotIdx + 1;
while (patchEndIdx < version.length()) {
Character ch = version.charAt(patchEndIdx);
if (ch == '.' || ch == '-' || ch == '+') {
break;
}
patchEndIdx++;
}
int extEndIdx = version.indexOf('+', patchEndIdx);
if (extEndIdx < 0) {
extEndIdx = version.length();
}
try {
Integer major = Integer.valueOf(version.substring(0, majorDotIdx));
Integer minor = Integer.valueOf(version.substring(majorDotIdx + 1, minorDotIdx));
Integer patch = Integer.valueOf(version.substring(minorDotIdx + 1, patchEndIdx));
String prerelease = (patchEndIdx == extEndIdx) ? "" : version.substring(patchEndIdx + 1, extEndIdx);
return new SemanticVersion(major, minor, patch, prerelease, version);
} catch (Throwable ex) {
return createInvalid(version);
}
} | class name to get package version of.
* @return parsed {@link SemanticVersion} | class name to get package version of.
* @return parsed {@link SemanticVersion} |
We have `CoreUtils.isNullOrEmpty()` utility method that can be used here. | public int compareTo(SemanticVersion other) {
if (this == other) {
return 0;
}
if (other == null) {
return -1;
}
if (major != other.major) {
return major > other.major ? 1 : -1;
}
if (minor != other.minor) {
return minor > other.minor ? 1 : -1;
}
if (patch != other.patch) {
return patch > other.patch ? 1 : -1;
}
if (isStringNullOrEmpty(prerelease)) {
return isStringNullOrEmpty(other.prerelease) ? 0 : 1;
}
if (isStringNullOrEmpty(other.prerelease)) {
return -1;
}
return prerelease.compareTo(other.prerelease);
} | if (isStringNullOrEmpty(prerelease)) { | public int compareTo(SemanticVersion other) {
if (this == other) {
return 0;
}
if (other == null) {
return -1;
}
if (major != other.major) {
return major > other.major ? 1 : -1;
}
if (minor != other.minor) {
return minor > other.minor ? 1 : -1;
}
if (patch != other.patch) {
return patch > other.patch ? 1 : -1;
}
if (CoreUtils.isNullOrEmpty(prerelease)) {
return CoreUtils.isNullOrEmpty(other.prerelease) ? 0 : 1;
}
if (CoreUtils.isNullOrEmpty(other.prerelease)) {
return -1;
}
return prerelease.compareTo(other.prerelease);
} | class to get package version of.
* @return parsed {@link SemanticVersion} | class to get package version of.
* @return parsed {@link SemanticVersion} |
shouldn't this have a return below it? and not sure if this needs to be a warning, just because you can't figure out the version doesn't mean you need to spam my logs 😢 | private void checkVersion(SemanticVersion version, String packageName) {
if (!version.isValid()) {
logger.warning("Could not find version of '{}'.", packageName);
}
if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) {
logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION);
}
if (version.getMajorVersion() > MAX_SUPPORTED_VERSION.getMajorVersion()) {
logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.",
version.getVersionString(),
packageName,
MAX_SUPPORTED_VERSION.getVersionString());
}
} | logger.warning("Could not find version of '{}'.", packageName); | private void checkVersion(SemanticVersion version, String packageName) {
if (!version.isValid()) {
logger.warning("Could not find version of '{}'.", packageName);
}
if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) {
logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION);
}
if (version.getMajorVersion() > MAX_SUPPORTED_VERSION.getMajorVersion()) {
logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.",
version.getVersionString(),
packageName,
MAX_SUPPORTED_VERSION.getVersionString());
}
} | class JacksonVersion {
private SemanticVersion annotationsVersion;
private SemanticVersion coreVersion;
private SemanticVersion databindVersion;
private SemanticVersion xmlVersion;
private SemanticVersion jsr310Version;
private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations";
private static final String CORE_PACKAGE_NAME = "jackson-core";
private static final String DATABIND_PACKAGE_NAME = "jackson-databind";
private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml";
private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310";
private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0");
private static final SemanticVersion MAX_SUPPORTED_VERSION = SemanticVersion.parse("2.12.4");
private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties";
private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version";
private static final String AZURE_CORE_VERSION = CoreUtils
.getProperties(AZURE_CORE_PROPERTIES_NAME)
.getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION);
private static JacksonVersion instance = null;
private final String helpString;
private final ClientLogger logger = new ClientLogger(JacksonVersion.class);
private JacksonVersion() {
annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty");
coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator");
databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper");
xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper");
jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule");
checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME);
checkVersion(coreVersion, CORE_PACKAGE_NAME);
checkVersion(databindVersion, DATABIND_PACKAGE_NAME);
checkVersion(xmlVersion, XML_PACKAGE_NAME);
checkVersion(jsr310Version, JSR310_PACKAGE_NAME);
helpString = formatHelpString();
logger.info(helpString);
}
/**
* Returns help info containing actual detected package versions.
*
* @return diagnostics information with detected versions.
*/
public String getHelpInfo() {
return helpString;
}
/**
* Gets {@code JacksonVersion} instance singleton.
*/
public static synchronized JacksonVersion getInstance() {
if (instance == null) {
instance = new JacksonVersion();
}
return instance;
}
/**
* Checks package version and logs if any issues detected.
*/
/**
* Generates help information with versions detected in runtime.
*/
private String formatHelpString() {
return new StringBuilder()
.append("Package versions: ")
.append(ANNOTATIONS_PACKAGE_NAME)
.append("=")
.append(annotationsVersion.getVersionString())
.append(", ")
.append(CORE_PACKAGE_NAME)
.append("=")
.append(coreVersion.getVersionString())
.append(", ")
.append(DATABIND_PACKAGE_NAME)
.append("=")
.append(databindVersion.getVersionString())
.append(", ")
.append(XML_PACKAGE_NAME)
.append("=")
.append(xmlVersion.getVersionString())
.append(", ")
.append(JSR310_PACKAGE_NAME)
.append("=")
.append(jsr310Version.getVersionString())
.append(", ")
.append("azure-core=")
.append(AZURE_CORE_VERSION)
.toString();
}
} | class JacksonVersion {
private SemanticVersion annotationsVersion;
private SemanticVersion coreVersion;
private SemanticVersion databindVersion;
private SemanticVersion xmlVersion;
private SemanticVersion jsr310Version;
private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations";
private static final String CORE_PACKAGE_NAME = "jackson-core";
private static final String DATABIND_PACKAGE_NAME = "jackson-databind";
private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml";
private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310";
private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0");
private static final SemanticVersion MAX_SUPPORTED_VERSION = SemanticVersion.parse("2.12.4");
private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties";
private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version";
private static final String AZURE_CORE_VERSION = CoreUtils
.getProperties(AZURE_CORE_PROPERTIES_NAME)
.getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION);
private static JacksonVersion instance = null;
private final String helpString;
private final ClientLogger logger = new ClientLogger(JacksonVersion.class);
private JacksonVersion() {
annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty");
coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator");
databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper");
xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper");
jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule");
checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME);
checkVersion(coreVersion, CORE_PACKAGE_NAME);
checkVersion(databindVersion, DATABIND_PACKAGE_NAME);
checkVersion(xmlVersion, XML_PACKAGE_NAME);
checkVersion(jsr310Version, JSR310_PACKAGE_NAME);
helpString = formatHelpString();
logger.info(helpString);
}
/**
* Returns help info containing actual detected package versions.
*
* @return diagnostics information with detected versions.
*/
public String getHelpInfo() {
return helpString;
}
/**
* Gets {@code JacksonVersion} instance singleton.
*/
public static synchronized JacksonVersion getInstance() {
if (instance == null) {
instance = new JacksonVersion();
}
return instance;
}
/**
* Checks package version and logs if any issues detected.
*/
/**
* Generates help information with versions detected in runtime.
*/
private String formatHelpString() {
return new StringBuilder()
.append("Package versions: ")
.append(ANNOTATIONS_PACKAGE_NAME)
.append("=")
.append(annotationsVersion.getVersionString())
.append(", ")
.append(CORE_PACKAGE_NAME)
.append("=")
.append(coreVersion.getVersionString())
.append(", ")
.append(DATABIND_PACKAGE_NAME)
.append("=")
.append(databindVersion.getVersionString())
.append(", ")
.append(XML_PACKAGE_NAME)
.append("=")
.append(xmlVersion.getVersionString())
.append(", ")
.append(JSR310_PACKAGE_NAME)
.append("=")
.append(jsr310Version.getVersionString())
.append(", ")
.append("azure-core=")
.append(AZURE_CORE_VERSION)
.toString();
}
} |
https://github.com/Azure/azure-sdk-for-java/pull/24842, https://github.com/Azure/azure-sdk-for-java/pull/24843 | private void checkVersion(SemanticVersion version, String packageName) {
if (!version.isValid()) {
logger.warning("Could not find version of '{}'.", packageName);
}
if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) {
logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION);
}
if (version.getMajorVersion() > MAX_SUPPORTED_VERSION.getMajorVersion()) {
logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.",
version.getVersionString(),
packageName,
MAX_SUPPORTED_VERSION.getVersionString());
}
} | logger.warning("Could not find version of '{}'.", packageName); | private void checkVersion(SemanticVersion version, String packageName) {
if (!version.isValid()) {
logger.warning("Could not find version of '{}'.", packageName);
}
if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) {
logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION);
}
if (version.getMajorVersion() > MAX_SUPPORTED_VERSION.getMajorVersion()) {
logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.",
version.getVersionString(),
packageName,
MAX_SUPPORTED_VERSION.getVersionString());
}
} | class JacksonVersion {
private SemanticVersion annotationsVersion;
private SemanticVersion coreVersion;
private SemanticVersion databindVersion;
private SemanticVersion xmlVersion;
private SemanticVersion jsr310Version;
private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations";
private static final String CORE_PACKAGE_NAME = "jackson-core";
private static final String DATABIND_PACKAGE_NAME = "jackson-databind";
private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml";
private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310";
private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0");
private static final SemanticVersion MAX_SUPPORTED_VERSION = SemanticVersion.parse("2.12.4");
private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties";
private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version";
private static final String AZURE_CORE_VERSION = CoreUtils
.getProperties(AZURE_CORE_PROPERTIES_NAME)
.getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION);
private static JacksonVersion instance = null;
private final String helpString;
private final ClientLogger logger = new ClientLogger(JacksonVersion.class);
private JacksonVersion() {
annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty");
coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator");
databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper");
xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper");
jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule");
checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME);
checkVersion(coreVersion, CORE_PACKAGE_NAME);
checkVersion(databindVersion, DATABIND_PACKAGE_NAME);
checkVersion(xmlVersion, XML_PACKAGE_NAME);
checkVersion(jsr310Version, JSR310_PACKAGE_NAME);
helpString = formatHelpString();
logger.info(helpString);
}
/**
* Returns help info containing actual detected package versions.
*
* @return diagnostics information with detected versions.
*/
public String getHelpInfo() {
return helpString;
}
/**
* Gets {@code JacksonVersion} instance singleton.
*/
public static synchronized JacksonVersion getInstance() {
if (instance == null) {
instance = new JacksonVersion();
}
return instance;
}
/**
* Checks package version and logs if any issues detected.
*/
/**
* Generates help information with versions detected in runtime.
*/
private String formatHelpString() {
return new StringBuilder()
.append("Package versions: ")
.append(ANNOTATIONS_PACKAGE_NAME)
.append("=")
.append(annotationsVersion.getVersionString())
.append(", ")
.append(CORE_PACKAGE_NAME)
.append("=")
.append(coreVersion.getVersionString())
.append(", ")
.append(DATABIND_PACKAGE_NAME)
.append("=")
.append(databindVersion.getVersionString())
.append(", ")
.append(XML_PACKAGE_NAME)
.append("=")
.append(xmlVersion.getVersionString())
.append(", ")
.append(JSR310_PACKAGE_NAME)
.append("=")
.append(jsr310Version.getVersionString())
.append(", ")
.append("azure-core=")
.append(AZURE_CORE_VERSION)
.toString();
}
} | class JacksonVersion {
private SemanticVersion annotationsVersion;
private SemanticVersion coreVersion;
private SemanticVersion databindVersion;
private SemanticVersion xmlVersion;
private SemanticVersion jsr310Version;
private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations";
private static final String CORE_PACKAGE_NAME = "jackson-core";
private static final String DATABIND_PACKAGE_NAME = "jackson-databind";
private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml";
private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310";
private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0");
private static final SemanticVersion MAX_SUPPORTED_VERSION = SemanticVersion.parse("2.12.4");
private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties";
private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version";
private static final String AZURE_CORE_VERSION = CoreUtils
.getProperties(AZURE_CORE_PROPERTIES_NAME)
.getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION);
private static JacksonVersion instance = null;
private final String helpString;
private final ClientLogger logger = new ClientLogger(JacksonVersion.class);
private JacksonVersion() {
annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty");
coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator");
databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper");
xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper");
jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule");
checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME);
checkVersion(coreVersion, CORE_PACKAGE_NAME);
checkVersion(databindVersion, DATABIND_PACKAGE_NAME);
checkVersion(xmlVersion, XML_PACKAGE_NAME);
checkVersion(jsr310Version, JSR310_PACKAGE_NAME);
helpString = formatHelpString();
logger.info(helpString);
}
/**
* Returns help info containing actual detected package versions.
*
* @return diagnostics information with detected versions.
*/
public String getHelpInfo() {
return helpString;
}
/**
* Gets {@code JacksonVersion} instance singleton.
*/
public static synchronized JacksonVersion getInstance() {
if (instance == null) {
instance = new JacksonVersion();
}
return instance;
}
/**
* Checks package version and logs if any issues detected.
*/
/**
* Generates help information with versions detected in runtime.
*/
private String formatHelpString() {
return new StringBuilder()
.append("Package versions: ")
.append(ANNOTATIONS_PACKAGE_NAME)
.append("=")
.append(annotationsVersion.getVersionString())
.append(", ")
.append(CORE_PACKAGE_NAME)
.append("=")
.append(coreVersion.getVersionString())
.append(", ")
.append(DATABIND_PACKAGE_NAME)
.append("=")
.append(databindVersion.getVersionString())
.append(", ")
.append(XML_PACKAGE_NAME)
.append("=")
.append(xmlVersion.getVersionString())
.append(", ")
.append(JSR310_PACKAGE_NAME)
.append("=")
.append(jsr310Version.getVersionString())
.append(", ")
.append("azure-core=")
.append(AZURE_CORE_VERSION)
.toString();
}
} |
Is it valid for outbound rule to have null frontend or backend? | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if(ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | } | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerOutboundRule> outboundRules() {
return Collections.unmodifiableMap(this.outboundRules);
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} |
For this part, I refered to loadBalancer inbound NAT rule, it also has 'clear deleted frontend references' logic, but frontend is mandatory for inbound NAT rule. So from my understanding, this is for clearing purpose before creating outbound rule, when create outbound rule, the frontend and backend will be set. Attach the code for inbound NAT rule part. ``` // Reset and update inbound NAT rules List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values()); if (null == innerNatRules) { innerNatRules = new ArrayList<>(); } this.innerModel().withInboundNatRules(innerNatRules); for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) { // Clear deleted frontend references SubResource ref = natRule.innerModel().frontendIpConfiguration(); if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) { natRule.innerModel().withFrontendIpConfiguration(null); } } ``` | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if(ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | } | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerOutboundRule> outboundRules() {
return Collections.unmodifiableMap(this.outboundRules);
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} |
The question is that when user does not create/update outbound rule, but removed some frontend/backend. Will it make some outbound rule invalid? | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if(ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | } | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerOutboundRule> outboundRules() {
return Collections.unmodifiableMap(this.outboundRules);
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} |
If user use below code to update loadbalancer who has outbound rule associate with frontend, the server will delete the frontend, but keep the outbound rule which has no frontend. ``` loadBalancer1 .update() .withoutFrontend(frontendName2) .apply(); ``` | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if(ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | } | protected void beforeCreating() {
if (this.creatablePIPKeys != null) {
for (Entry<String, String> pipFrontendAssociation : this.creatablePIPKeys.entrySet()) {
PublicIpAddress pip = this.<PublicIpAddress>taskResult(pipFrontendAssociation.getKey());
if (pip != null) {
withExistingPublicIPAddress(pip.id(), pipFrontendAssociation.getValue());
}
}
this.creatablePIPKeys.clear();
}
List<ProbeInner> innerProbes = innersFromWrappers(this.httpProbes.values());
innerProbes = innersFromWrappers(this.httpsProbes.values(), innerProbes);
innerProbes = innersFromWrappers(this.tcpProbes.values(), innerProbes);
if (innerProbes == null) {
innerProbes = new ArrayList<>();
}
this.innerModel().withProbes(innerProbes);
List<BackendAddressPoolInner> innerBackends = innersFromWrappers(this.backends.values());
if (null == innerBackends) {
innerBackends = new ArrayList<>();
}
this.innerModel().withBackendAddressPools(innerBackends);
List<FrontendIpConfigurationInner> innerFrontends = innersFromWrappers(this.frontends.values());
if (null == innerFrontends) {
innerFrontends = new ArrayList<>();
}
this.innerModel().withFrontendIpConfigurations(innerFrontends);
List<InboundNatRuleInner> innerNatRules = innersFromWrappers(this.inboundNatRules.values());
if (null == innerNatRules) {
innerNatRules = new ArrayList<>();
}
this.innerModel().withInboundNatRules(innerNatRules);
for (LoadBalancerInboundNatRule natRule : this.inboundNatRules.values()) {
SubResource ref = natRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natRule.innerModel().withFrontendIpConfiguration(null);
}
}
List<InboundNatPool> innerNatPools = innersFromWrappers(this.inboundNatPools.values());
if (null == innerNatPools) {
innerNatPools = new ArrayList<>();
}
this.innerModel().withInboundNatPools(innerNatPools);
for (LoadBalancerInboundNatPool natPool : this.inboundNatPools.values()) {
SubResource ref = natPool.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
natPool.innerModel().withFrontendIpConfiguration(null);
}
}
List<OutboundRuleInner> innerOutboundRules = innersFromWrappers(this.outboundRules.values());
if (null == innerOutboundRules) {
innerOutboundRules = new ArrayList<>();
}
this.innerModel().withOutboundRules(innerOutboundRules);
for (LoadBalancerOutboundRule outboundRule : this.outboundRules.values()) {
List<SubResource> refs = outboundRule.innerModel().frontendIpConfigurations();
if (refs != null && !refs.isEmpty()) {
List<SubResource> existingFrontendIpConfigurations =
refs.stream()
.filter(ref ->
this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
)
.collect(Collectors.toList());
existingFrontendIpConfigurations = existingFrontendIpConfigurations.isEmpty() ? null : existingFrontendIpConfigurations;
outboundRule.innerModel().withFrontendIpConfigurations(existingFrontendIpConfigurations);
}
SubResource ref = outboundRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
outboundRule.innerModel().withBackendAddressPool(null);
}
}
List<LoadBalancingRuleInner> innerRules = innersFromWrappers(this.loadBalancingRules.values());
if (innerRules == null) {
innerRules = new ArrayList<>();
}
this.innerModel().withLoadBalancingRules(innerRules);
for (LoadBalancingRule lbRule : this.loadBalancingRules.values()) {
SubResource ref;
ref = lbRule.innerModel().frontendIpConfiguration();
if (ref != null && !this.frontends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withFrontendIpConfiguration(null);
}
ref = lbRule.innerModel().backendAddressPool();
if (ref != null && !this.backends().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withBackendAddressPool(null);
}
ref = lbRule.innerModel().probe();
if (ref != null
&& !this.httpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.httpsProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))
&& !this.tcpProbes().containsKey(ResourceUtils.nameFromResourceId(ref.id()))) {
lbRule.innerModel().withProbe(null);
}
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} | class LoadBalancerImpl
extends GroupableParentResourceWithTagsImpl<LoadBalancer, LoadBalancerInner, LoadBalancerImpl, NetworkManager>
implements LoadBalancer, LoadBalancer.Definition, LoadBalancer.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private final Map<String, String> nicsInBackends = new HashMap<>();
protected final Map<String, String> creatablePIPKeys = new HashMap<>();
private Map<String, LoadBalancerBackend> backends;
private Map<String, LoadBalancerTcpProbe> tcpProbes;
private Map<String, LoadBalancerHttpProbe> httpProbes;
private Map<String, LoadBalancerHttpProbe> httpsProbes;
private Map<String, LoadBalancingRule> loadBalancingRules;
private Map<String, LoadBalancerFrontend> frontends;
private Map<String, LoadBalancerInboundNatRule> inboundNatRules;
private Map<String, LoadBalancerInboundNatPool> inboundNatPools;
private Map<String, LoadBalancerOutboundRule> outboundRules;
LoadBalancerImpl(String name, final LoadBalancerInner innerModel, final NetworkManager networkManager) {
super(name, innerModel, networkManager);
}
@Override
public Mono<LoadBalancer> refreshAsync() {
return super
.refreshAsync()
.map(
loadBalancer -> {
LoadBalancerImpl impl = (LoadBalancerImpl) loadBalancer;
impl.initializeChildrenFromInner();
return impl;
});
}
@Override
protected Mono<LoadBalancerInner> getInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
protected Mono<LoadBalancerInner> applyTagsToInnerAsync() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.updateTagsAsync(resourceGroupName(), name(), innerModel().tags());
}
@Override
protected void initializeChildrenFromInner() {
initializeFrontendsFromInner();
initializeProbesFromInner();
initializeBackendsFromInner();
initializeLoadBalancingRulesFromInner();
initializeInboundNatRulesFromInner();
initializeInboundNatPoolsFromInner();
initializeOutboundRulesFromInner();
}
protected LoadBalancerBackendImpl ensureUniqueBackend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("backend", 20);
LoadBalancerBackendImpl backend = this.defineBackend(name);
backend.attach();
return backend;
}
protected SubResource ensureFrontendRef(String name) {
LoadBalancerFrontendImpl frontend;
if (name == null) {
frontend = this.ensureUniqueFrontend();
} else {
frontend = this.defineFrontend(name);
frontend.attach();
}
return new SubResource().withId(this.futureResourceId() + "/frontendIpConfigurations/" + frontend.name());
}
protected LoadBalancerFrontendImpl ensureUniqueFrontend() {
String name = this.manager().resourceManager().internalContext().randomResourceName("frontend", 20);
LoadBalancerFrontendImpl frontend = this.defineFrontend(name);
frontend.attach();
return frontend;
}
LoadBalancerPrivateFrontend findPrivateFrontendWithSubnet(String networkId, String subnetName) {
if (null == networkId || null == subnetName) {
return null;
} else {
for (LoadBalancerPrivateFrontend frontend : this.privateFrontends().values()) {
if (frontend.networkId() == null || frontend.subnetName() == null) {
continue;
} else if (networkId.equalsIgnoreCase(frontend.networkId())
&& subnetName.equalsIgnoreCase(frontend.subnetName())) {
return frontend;
}
}
return null;
}
}
LoadBalancerPrivateFrontend ensurePrivateFrontendWithSubnet(String networkId, String subnetName) {
LoadBalancerPrivateFrontend frontend = this.findPrivateFrontendWithSubnet(networkId, subnetName);
if (networkId == null || subnetName == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe =
this.ensureUniqueFrontend().withExistingSubnet(networkId, subnetName).withPrivateIpAddressDynamic();
fe.attach();
return fe;
}
}
LoadBalancerPublicFrontend ensurePublicFrontendWithPip(String pipId) {
LoadBalancerPublicFrontend frontend = this.findFrontendByPublicIpAddress(pipId);
if (pipId == null) {
return null;
} else if (frontend != null) {
return frontend;
} else {
LoadBalancerFrontendImpl fe = this.ensureUniqueFrontend().withExistingPublicIpAddress(pipId);
fe.attach();
return fe;
}
}
@Override
protected Mono<Void> afterCreatingAsync() {
if (this.nicsInBackends != null) {
List<Throwable> nicExceptions = new ArrayList<>();
return Flux
.fromIterable(this.nicsInBackends.entrySet())
.flatMap(
nicInBackend -> {
String nicId = nicInBackend.getKey();
String backendName = nicInBackend.getValue();
return this
.manager()
.networkInterfaces()
.getByIdAsync(nicId)
.flatMap(
nic -> {
NicIpConfiguration nicIP = nic.primaryIPConfiguration();
return nic
.update()
.updateIPConfiguration(nicIP.name())
.withExistingLoadBalancerBackend(this, backendName)
.parent()
.applyAsync();
});
})
.onErrorResume(
t -> {
nicExceptions.add(t);
return Mono.empty();
})
.then(
Mono
.defer(
() -> {
if (!nicExceptions.isEmpty()) {
return Mono.error(Exceptions.multiple(nicExceptions));
} else {
this.nicsInBackends.clear();
return Mono.empty();
}
}));
}
return Mono.empty();
}
@Override
protected Mono<LoadBalancerInner> createInner() {
return this
.manager()
.serviceClient()
.getLoadBalancers()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel());
}
@Override
public Mono<LoadBalancer> createResourceAsync() {
beforeCreating();
return createInner()
.flatMap(
inner -> {
setInner(inner);
initializeChildrenFromInner();
return afterCreatingAsync().then(this.refreshAsync());
});
}
private void initializeFrontendsFromInner() {
this.frontends = new TreeMap<>();
List<FrontendIpConfigurationInner> frontendsInner = this.innerModel().frontendIpConfigurations();
if (frontendsInner != null) {
for (FrontendIpConfigurationInner frontendInner : frontendsInner) {
LoadBalancerFrontendImpl frontend = new LoadBalancerFrontendImpl(frontendInner, this);
this.frontends.put(frontendInner.name(), frontend);
}
}
}
private void initializeBackendsFromInner() {
this.backends = new TreeMap<>();
List<BackendAddressPoolInner> backendsInner = this.innerModel().backendAddressPools();
if (backendsInner != null) {
for (BackendAddressPoolInner backendInner : backendsInner) {
LoadBalancerBackendImpl backend = new LoadBalancerBackendImpl(backendInner, this);
this.backends.put(backendInner.name(), backend);
}
}
}
private void initializeProbesFromInner() {
this.httpProbes = new TreeMap<>();
this.httpsProbes = new TreeMap<>();
this.tcpProbes = new TreeMap<>();
if (this.innerModel().probes() != null) {
for (ProbeInner probeInner : this.innerModel().probes()) {
LoadBalancerProbeImpl probe = new LoadBalancerProbeImpl(probeInner, this);
if (probeInner.protocol().equals(ProbeProtocol.TCP)) {
this.tcpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTP)) {
this.httpProbes.put(probeInner.name(), probe);
} else if (probeInner.protocol().equals(ProbeProtocol.HTTPS)) {
this.httpsProbes.put(probeInner.name(), probe);
}
}
}
}
private void initializeLoadBalancingRulesFromInner() {
this.loadBalancingRules = new TreeMap<>();
List<LoadBalancingRuleInner> rulesInner = this.innerModel().loadBalancingRules();
if (rulesInner != null) {
for (LoadBalancingRuleInner ruleInner : rulesInner) {
LoadBalancingRuleImpl rule = new LoadBalancingRuleImpl(ruleInner, this);
this.loadBalancingRules.put(ruleInner.name(), rule);
}
}
}
private void initializeInboundNatPoolsFromInner() {
this.inboundNatPools = new TreeMap<>();
List<InboundNatPool> inners = this.innerModel().inboundNatPools();
if (inners != null) {
for (InboundNatPool inner : inners) {
LoadBalancerInboundNatPoolImpl wrapper = new LoadBalancerInboundNatPoolImpl(inner, this);
this.inboundNatPools.put(wrapper.name(), wrapper);
}
}
}
private void initializeInboundNatRulesFromInner() {
this.inboundNatRules = new TreeMap<>();
List<InboundNatRuleInner> rulesInner = this.innerModel().inboundNatRules();
if (rulesInner != null) {
for (InboundNatRuleInner ruleInner : rulesInner) {
LoadBalancerInboundNatRuleImpl rule = new LoadBalancerInboundNatRuleImpl(ruleInner, this);
this.inboundNatRules.put(ruleInner.name(), rule);
}
}
}
private void initializeOutboundRulesFromInner() {
this.outboundRules = new TreeMap<>();
List<OutboundRuleInner> rulesInner = this.innerModel().outboundRules();
if (rulesInner != null) {
for (OutboundRuleInner ruleInner : rulesInner) {
LoadBalancerOutboundRule rule = new LoadBalancerOutboundRuleImpl(ruleInner, this);
this.outboundRules.put(ruleInner.name(), rule);
}
}
}
String futureResourceId() {
return new StringBuilder()
.append(super.resourceIdBase())
.append("/providers/Microsoft.Network/loadBalancers/")
.append(this.name())
.toString();
}
LoadBalancerImpl withFrontend(LoadBalancerFrontendImpl frontend) {
if (frontend != null) {
this.frontends.put(frontend.name(), frontend);
}
return this;
}
LoadBalancerImpl withProbe(LoadBalancerProbeImpl probe) {
if (probe == null) {
return this;
} else if (probe.protocol() == ProbeProtocol.HTTP) {
httpProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.HTTPS) {
httpsProbes.put(probe.name(), probe);
} else if (probe.protocol() == ProbeProtocol.TCP) {
tcpProbes.put(probe.name(), probe);
}
return this;
}
LoadBalancerImpl withLoadBalancingRule(LoadBalancingRuleImpl loadBalancingRule) {
if (loadBalancingRule != null) {
this.loadBalancingRules.put(loadBalancingRule.name(), loadBalancingRule);
}
return this;
}
LoadBalancerImpl withInboundNatRule(LoadBalancerInboundNatRuleImpl inboundNatRule) {
if (inboundNatRule != null) {
this.inboundNatRules.put(inboundNatRule.name(), inboundNatRule);
}
return this;
}
LoadBalancerImpl withInboundNatPool(LoadBalancerInboundNatPoolImpl inboundNatPool) {
if (inboundNatPool != null) {
this.inboundNatPools.put(inboundNatPool.name(), inboundNatPool);
}
return this;
}
LoadBalancerImpl withOutboundRule(LoadBalancerOutboundRuleImpl outboundRule) {
if (outboundRule != null) {
this.outboundRules.put(outboundRule.name(), outboundRule);
}
return this;
}
LoadBalancerImpl withBackend(LoadBalancerBackendImpl backend) {
if (backend != null) {
this.backends.put(backend.name(), backend);
}
return this;
}
LoadBalancerImpl withNewPublicIPAddress(String dnsLeafLabel, String frontendName) {
PublicIpAddress.DefinitionStages.WithGroup precreatablePIP =
manager().publicIpAddresses().define(dnsLeafLabel).withRegion(this.regionName());
Creatable<PublicIpAddress> creatablePip;
if (super.creatableGroup == null) {
creatablePip =
precreatablePIP.withExistingResourceGroup(this.resourceGroupName()).withLeafDomainLabel(dnsLeafLabel);
} else {
creatablePip = precreatablePIP.withNewResourceGroup(super.creatableGroup).withLeafDomainLabel(dnsLeafLabel);
}
return withNewPublicIPAddress(creatablePip, frontendName);
}
LoadBalancerImpl withNewPublicIPAddress(Creatable<PublicIpAddress> creatablePip, String frontendName) {
String existingPipFrontendName = this.creatablePIPKeys.get(creatablePip.key());
if (frontendName == null) {
if (existingPipFrontendName != null) {
frontendName = existingPipFrontendName;
} else {
frontendName = ensureUniqueFrontend().name();
}
}
if (existingPipFrontendName == null) {
this.creatablePIPKeys.put(this.addDependency(creatablePip), frontendName);
} else if (!existingPipFrontendName.equalsIgnoreCase(frontendName)) {
String exceptionMessage =
"This public IP address definition is already associated with a frontend under a different name.";
throw logger.logExceptionAsError(new IllegalArgumentException(exceptionMessage));
}
return this;
}
protected LoadBalancerImpl withExistingPublicIPAddress(String resourceId, String frontendName) {
if (frontendName == null) {
return ensureUniqueFrontend().withExistingPublicIpAddress(resourceId).parent();
} else {
return this.definePublicFrontend(frontendName).withExistingPublicIpAddress(resourceId).attach();
}
}
LoadBalancerImpl withExistingVirtualMachine(HasNetworkInterfaces vm, String backendName) {
if (backendName != null) {
this.defineBackend(backendName).attach();
if (vm.primaryNetworkInterfaceId() != null) {
this.nicsInBackends.put(vm.primaryNetworkInterfaceId(), backendName.toLowerCase(Locale.ROOT));
}
}
return this;
}
@Override
public LoadBalancerProbeImpl defineTcpProbe(String name) {
LoadBalancerProbe probe = this.tcpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.TCP);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpProbe(String name) {
LoadBalancerProbe probe = this.httpProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTP).withPort(80);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancerProbeImpl defineHttpsProbe(String name) {
LoadBalancerProbe probe = this.httpsProbes.get(name);
if (probe == null) {
ProbeInner inner = new ProbeInner().withName(name).withProtocol(ProbeProtocol.HTTPS).withPort(443);
return new LoadBalancerProbeImpl(inner, this);
} else {
return (LoadBalancerProbeImpl) probe;
}
}
@Override
public LoadBalancingRuleImpl defineLoadBalancingRule(String name) {
LoadBalancingRule lbRule = this.loadBalancingRules.get(name);
if (lbRule == null) {
LoadBalancingRuleInner inner = new LoadBalancingRuleInner().withName(name);
return new LoadBalancingRuleImpl(inner, this);
} else {
return (LoadBalancingRuleImpl) lbRule;
}
}
@Override
public LoadBalancerInboundNatRuleImpl defineInboundNatRule(String name) {
LoadBalancerInboundNatRule natRule = this.inboundNatRules.get(name);
if (natRule == null) {
InboundNatRuleInner inner = new InboundNatRuleInner().withName(name);
return new LoadBalancerInboundNatRuleImpl(inner, this);
} else {
return (LoadBalancerInboundNatRuleImpl) natRule;
}
}
@Override
public LoadBalancerInboundNatPoolImpl defineInboundNatPool(String name) {
LoadBalancerInboundNatPool natPool = this.inboundNatPools.get(name);
if (natPool == null) {
InboundNatPool inner = new InboundNatPool().withName(name);
return new LoadBalancerInboundNatPoolImpl(inner, this);
} else {
return (LoadBalancerInboundNatPoolImpl) natPool;
}
}
@Override
public LoadBalancerFrontendImpl definePrivateFrontend(String name) {
return defineFrontend(name);
}
@Override
public LoadBalancerFrontendImpl definePublicFrontend(String name) {
return defineFrontend(name);
}
LoadBalancerFrontendImpl defineFrontend(String name) {
LoadBalancerFrontend frontend = this.frontends.get(name);
if (frontend == null) {
FrontendIpConfigurationInner inner = new FrontendIpConfigurationInner().withName(name);
return new LoadBalancerFrontendImpl(inner, this);
} else {
return (LoadBalancerFrontendImpl) frontend;
}
}
@Override
public LoadBalancerBackendImpl defineBackend(String name) {
LoadBalancerBackend backend = this.backends.get(name);
if (backend == null) {
BackendAddressPoolInner inner = new BackendAddressPoolInner().withName(name);
return new LoadBalancerBackendImpl(inner, this);
} else {
return (LoadBalancerBackendImpl) backend;
}
}
@Override
public LoadBalancerImpl withSku(LoadBalancerSkuType skuType) {
this.innerModel().withSku(skuType.sku());
return this;
}
@Override
public LoadBalancerImpl withoutProbe(String name) {
if (this.httpProbes.containsKey(name)) {
this.httpProbes.remove(name);
} else if (this.httpsProbes.containsKey(name)) {
this.httpsProbes.remove(name);
} else if (this.tcpProbes.containsKey(name)) {
this.tcpProbes.remove(name);
}
return this;
}
@Override
public LoadBalancerProbeImpl updateTcpProbe(String name) {
return (LoadBalancerProbeImpl) this.tcpProbes.get(name);
}
@Override
public LoadBalancerBackendImpl updateBackend(String name) {
return (LoadBalancerBackendImpl) this.backends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePublicFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerFrontendImpl updatePrivateFrontend(String name) {
return (LoadBalancerFrontendImpl) this.frontends.get(name);
}
@Override
public LoadBalancerInboundNatRuleImpl updateInboundNatRule(String name) {
return (LoadBalancerInboundNatRuleImpl) this.inboundNatRules.get(name);
}
@Override
public LoadBalancerInboundNatPoolImpl updateInboundNatPool(String name) {
return (LoadBalancerInboundNatPoolImpl) this.inboundNatPools.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpProbe(String name) {
return (LoadBalancerProbeImpl) this.httpProbes.get(name);
}
@Override
public LoadBalancerProbeImpl updateHttpsProbe(String name) {
return (LoadBalancerProbeImpl) this.httpsProbes.get(name);
}
@Override
public LoadBalancingRuleImpl updateLoadBalancingRule(String name) {
return (LoadBalancingRuleImpl) this.loadBalancingRules.get(name);
}
@Override
public LoadBalancerImpl withoutLoadBalancingRule(String name) {
this.loadBalancingRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutInboundNatRule(String name) {
this.inboundNatRules.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutBackend(String name) {
this.backends.remove(name);
return this;
}
@Override
public Update withoutInboundNatPool(String name) {
this.inboundNatPools.remove(name);
return this;
}
@Override
public LoadBalancerImpl withoutFrontend(String name) {
this.frontends.remove(name);
return this;
}
@Override
public Map<String, LoadBalancerBackend> backends() {
return Collections.unmodifiableMap(this.backends);
}
@Override
public Map<String, LoadBalancerInboundNatPool> inboundNatPools() {
return Collections.unmodifiableMap(this.inboundNatPools);
}
@Override
public LoadBalancerSkuType sku() {
return LoadBalancerSkuType.fromSku(this.innerModel().sku());
}
@Override
public Map<String, LoadBalancerOutboundRule> outboundRules() {
return Collections.unmodifiableMap(this.outboundRules);
}
@Override
public Map<String, LoadBalancerTcpProbe> tcpProbes() {
return Collections.unmodifiableMap(this.tcpProbes);
}
@Override
public Map<String, LoadBalancerFrontend> frontends() {
return Collections.unmodifiableMap(this.frontends);
}
@Override
public Map<String, LoadBalancerPrivateFrontend> privateFrontends() {
Map<String, LoadBalancerPrivateFrontend> privateFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (!frontend.isPublic()) {
privateFrontends.put(frontend.name(), (LoadBalancerPrivateFrontend) frontend);
}
}
return Collections.unmodifiableMap(privateFrontends);
}
@Override
public Map<String, LoadBalancerPublicFrontend> publicFrontends() {
Map<String, LoadBalancerPublicFrontend> publicFrontends = new HashMap<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
publicFrontends.put(frontend.name(), (LoadBalancerPublicFrontend) frontend);
}
}
return Collections.unmodifiableMap(publicFrontends);
}
@Override
public Map<String, LoadBalancerInboundNatRule> inboundNatRules() {
return Collections.unmodifiableMap(this.inboundNatRules);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpProbes() {
return Collections.unmodifiableMap(this.httpProbes);
}
@Override
public Map<String, LoadBalancerHttpProbe> httpsProbes() {
return Collections.unmodifiableMap(this.httpsProbes);
}
@Override
public Map<String, LoadBalancingRule> loadBalancingRules() {
return Collections.unmodifiableMap(this.loadBalancingRules);
}
@Override
public List<String> publicIpAddressIds() {
List<String> publicIPAddressIds = new ArrayList<>();
for (LoadBalancerFrontend frontend : this.frontends().values()) {
if (frontend.isPublic()) {
String pipId = ((LoadBalancerPublicFrontend) frontend).publicIpAddressId();
publicIPAddressIds.add(pipId);
}
}
return Collections.unmodifiableList(publicIPAddressIds);
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(String pipId) {
if (pipId == null) {
return null;
}
for (LoadBalancerPublicFrontend frontend : this.publicFrontends().values()) {
if (frontend.publicIpAddressId() == null) {
continue;
} else if (pipId.equalsIgnoreCase(frontend.publicIpAddressId())) {
return frontend;
}
}
return null;
}
@Override
public LoadBalancerPublicFrontend findFrontendByPublicIpAddress(PublicIpAddress publicIPAddress) {
return (publicIPAddress != null) ? this.findFrontendByPublicIpAddress(publicIPAddress.id()) : null;
}
@Override
public LoadBalancerImpl withoutOutboundRule(String name) {
this.outboundRules.remove(name);
return this;
}
@Override
public LoadBalancerOutboundRuleImpl defineOutboundRule(String name) {
LoadBalancerOutboundRule outboundRule = this.outboundRules.get(name);
if (outboundRule == null) {
OutboundRuleInner inner = new OutboundRuleInner().withName(name);
return new LoadBalancerOutboundRuleImpl(inner, this);
} else {
return (LoadBalancerOutboundRuleImpl) outboundRule;
}
}
@Override
public LoadBalancerOutboundRuleImpl updateOutboundRule(String name) {
return (LoadBalancerOutboundRuleImpl) this.outboundRules.get(name);
}
} |
Does the function name end with `opt - out`? | private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
return new JobManifestTasks()
.setEntityRecognitionTasks(actions.getRecognizeEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntitiesTask entitiesTask = new EntitiesTask();
entitiesTask.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT));
return entitiesTask;
}).collect(Collectors.toList()))
.setEntityRecognitionPiiTasks(actions.getRecognizePiiEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizePiiEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final PiiTask piiTask = new PiiTask();
piiTask.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))
);
return piiTask;
}).collect(Collectors.toList()))
.setKeyPhraseExtractionTasks(actions.getExtractKeyPhrasesActions() == null ? null
: StreamSupport.stream(actions.getExtractKeyPhrasesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final KeyPhrasesTask keyPhrasesTask = new KeyPhrasesTask();
keyPhrasesTask.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
);
return keyPhrasesTask;
}).collect(Collectors.toList()))
.setEntityLinkingTasks(actions.getRecognizeLinkedEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeLinkedEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntityLinkingTask entityLinkingTask = new EntityLinkingTask();
entityLinkingTask.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return entityLinkingTask;
}).collect(Collectors.toList()))
.setSentimentAnalysisTasks(actions.getAnalyzeSentimentActions() == null ? null
: StreamSupport.stream(actions.getAnalyzeSentimentActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final SentimentAnalysisTask sentimentAnalysisTask = new SentimentAnalysisTask();
sentimentAnalysisTask.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return sentimentAnalysisTask;
}).collect(Collectors.toList()))
.setExtractiveSummarizationTasks(actions.getExtractSummaryActions() == null ? null
: StreamSupport.stream(actions.getExtractSummaryActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final ExtractiveSummarizationTask extractiveSummarizationTask =
new ExtractiveSummarizationTask();
extractiveSummarizationTask.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getSentencesOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getSentencesOrderBy().toString()))
);
return extractiveSummarizationTask;
}).collect(Collectors.toList()));
} | .setLoggingOptOut(action.isServiceLogsDisabled()) | private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
return new JobManifestTasks()
.setEntityRecognitionTasks(actions.getRecognizeEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntitiesTask entitiesTask = new EntitiesTask();
entitiesTask.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT));
return entitiesTask;
}).collect(Collectors.toList()))
.setEntityRecognitionPiiTasks(actions.getRecognizePiiEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizePiiEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final PiiTask piiTask = new PiiTask();
piiTask.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))
);
return piiTask;
}).collect(Collectors.toList()))
.setKeyPhraseExtractionTasks(actions.getExtractKeyPhrasesActions() == null ? null
: StreamSupport.stream(actions.getExtractKeyPhrasesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final KeyPhrasesTask keyPhrasesTask = new KeyPhrasesTask();
keyPhrasesTask.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
);
return keyPhrasesTask;
}).collect(Collectors.toList()))
.setEntityLinkingTasks(actions.getRecognizeLinkedEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeLinkedEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntityLinkingTask entityLinkingTask = new EntityLinkingTask();
entityLinkingTask.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return entityLinkingTask;
}).collect(Collectors.toList()))
.setSentimentAnalysisTasks(actions.getAnalyzeSentimentActions() == null ? null
: StreamSupport.stream(actions.getAnalyzeSentimentActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final SentimentAnalysisTask sentimentAnalysisTask = new SentimentAnalysisTask();
sentimentAnalysisTask.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return sentimentAnalysisTask;
}).collect(Collectors.toList()))
.setExtractiveSummarizationTasks(actions.getExtractSummaryActions() == null ? null
: StreamSupport.stream(actions.getExtractSummaryActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final ExtractiveSummarizationTask extractiveSummarizationTask =
new ExtractiveSummarizationTask();
extractiveSummarizationTask.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getSentencesOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getSentencesOrderBy().toString()))
);
return extractiveSummarizationTask;
}).collect(Collectors.toList()));
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS, EXTRACTIVE_SUMMARIZATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS, EXTRACTIVE_SUMMARIZATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
nvm, just saw the referenced PR. | private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
return new JobManifestTasks()
.setEntityRecognitionTasks(actions.getRecognizeEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntitiesTask entitiesTask = new EntitiesTask();
entitiesTask.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT));
return entitiesTask;
}).collect(Collectors.toList()))
.setEntityRecognitionPiiTasks(actions.getRecognizePiiEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizePiiEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final PiiTask piiTask = new PiiTask();
piiTask.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))
);
return piiTask;
}).collect(Collectors.toList()))
.setKeyPhraseExtractionTasks(actions.getExtractKeyPhrasesActions() == null ? null
: StreamSupport.stream(actions.getExtractKeyPhrasesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final KeyPhrasesTask keyPhrasesTask = new KeyPhrasesTask();
keyPhrasesTask.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
);
return keyPhrasesTask;
}).collect(Collectors.toList()))
.setEntityLinkingTasks(actions.getRecognizeLinkedEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeLinkedEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntityLinkingTask entityLinkingTask = new EntityLinkingTask();
entityLinkingTask.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return entityLinkingTask;
}).collect(Collectors.toList()))
.setSentimentAnalysisTasks(actions.getAnalyzeSentimentActions() == null ? null
: StreamSupport.stream(actions.getAnalyzeSentimentActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final SentimentAnalysisTask sentimentAnalysisTask = new SentimentAnalysisTask();
sentimentAnalysisTask.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return sentimentAnalysisTask;
}).collect(Collectors.toList()))
.setExtractiveSummarizationTasks(actions.getExtractSummaryActions() == null ? null
: StreamSupport.stream(actions.getExtractSummaryActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final ExtractiveSummarizationTask extractiveSummarizationTask =
new ExtractiveSummarizationTask();
extractiveSummarizationTask.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getSentencesOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getSentencesOrderBy().toString()))
);
return extractiveSummarizationTask;
}).collect(Collectors.toList()));
} | .setLoggingOptOut(action.isServiceLogsDisabled()) | private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
return new JobManifestTasks()
.setEntityRecognitionTasks(actions.getRecognizeEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntitiesTask entitiesTask = new EntitiesTask();
entitiesTask.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT));
return entitiesTask;
}).collect(Collectors.toList()))
.setEntityRecognitionPiiTasks(actions.getRecognizePiiEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizePiiEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final PiiTask piiTask = new PiiTask();
piiTask.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))
);
return piiTask;
}).collect(Collectors.toList()))
.setKeyPhraseExtractionTasks(actions.getExtractKeyPhrasesActions() == null ? null
: StreamSupport.stream(actions.getExtractKeyPhrasesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final KeyPhrasesTask keyPhrasesTask = new KeyPhrasesTask();
keyPhrasesTask.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
);
return keyPhrasesTask;
}).collect(Collectors.toList()))
.setEntityLinkingTasks(actions.getRecognizeLinkedEntitiesActions() == null ? null
: StreamSupport.stream(actions.getRecognizeLinkedEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntityLinkingTask entityLinkingTask = new EntityLinkingTask();
entityLinkingTask.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return entityLinkingTask;
}).collect(Collectors.toList()))
.setSentimentAnalysisTasks(actions.getAnalyzeSentimentActions() == null ? null
: StreamSupport.stream(actions.getAnalyzeSentimentActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final SentimentAnalysisTask sentimentAnalysisTask = new SentimentAnalysisTask();
sentimentAnalysisTask.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
);
return sentimentAnalysisTask;
}).collect(Collectors.toList()))
.setExtractiveSummarizationTasks(actions.getExtractSummaryActions() == null ? null
: StreamSupport.stream(actions.getExtractSummaryActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final ExtractiveSummarizationTask extractiveSummarizationTask =
new ExtractiveSummarizationTask();
extractiveSummarizationTask.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getSentencesOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getSentencesOrderBy().toString()))
);
return extractiveSummarizationTask;
}).collect(Collectors.toList()));
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS, EXTRACTIVE_SUMMARIZATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS, EXTRACTIVE_SUMMARIZATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
Does this mean the delay value can be negative? | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
String retryHeaderValue = responseHeaders.getValue(X_MS_RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
try {
return Duration.ofSeconds(Integer.parseInt(retryHeaderValue));
} catch (NumberFormatException ex) {
return Duration.between(OffsetDateTime.now(), new DateTimeRfc1123(retryHeaderValue).getDateTime());
}
}
return retryStrategy.calculateRetryDelay(tryCount);
} | if (delayMs != Integer.MIN_VALUE) { | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount == retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static int tryParseHeaderToInt(String headerValue) {
try {
return Integer.parseInt(headerValue);
} catch (NumberFormatException ex) {
return Integer.MIN_VALUE;
}
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static Duration tryParseLongOrDateTime(String value) {
long delaySeconds;
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
delaySeconds = OffsetDateTime.now().until(retryAfter, ChronoUnit.SECONDS);
} catch (DateTimeException ex) {
delaySeconds = tryParseLong(value);
}
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} |
Is there a particular order we should follow to parse these headers? Is it possible for more than one to be present in a response? | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
String retryHeaderValue = responseHeaders.getValue(X_MS_RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
try {
return Duration.ofSeconds(Integer.parseInt(retryHeaderValue));
} catch (NumberFormatException ex) {
return Duration.between(OffsetDateTime.now(), new DateTimeRfc1123(retryHeaderValue).getDateTime());
}
}
return retryStrategy.calculateRetryDelay(tryCount);
} | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount == retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static int tryParseHeaderToInt(String headerValue) {
try {
return Integer.parseInt(headerValue);
} catch (NumberFormatException ex) {
return Integer.MIN_VALUE;
}
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static Duration tryParseLongOrDateTime(String value) {
long delaySeconds;
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
delaySeconds = OffsetDateTime.now().until(retryAfter, ChronoUnit.SECONDS);
} catch (DateTimeException ex) {
delaySeconds = tryParseLong(value);
}
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} | |
I can update this to guard against negative retry delays | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
String retryHeaderValue = responseHeaders.getValue(X_MS_RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
try {
return Duration.ofSeconds(Integer.parseInt(retryHeaderValue));
} catch (NumberFormatException ex) {
return Duration.between(OffsetDateTime.now(), new DateTimeRfc1123(retryHeaderValue).getDateTime());
}
}
return retryStrategy.calculateRetryDelay(tryCount);
} | if (delayMs != Integer.MIN_VALUE) { | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount == retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static int tryParseHeaderToInt(String headerValue) {
try {
return Integer.parseInt(headerValue);
} catch (NumberFormatException ex) {
return Integer.MIN_VALUE;
}
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static Duration tryParseLongOrDateTime(String value) {
long delaySeconds;
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
delaySeconds = OffsetDateTime.now().until(retryAfter, ChronoUnit.SECONDS);
} catch (DateTimeException ex) {
delaySeconds = tryParseLong(value);
}
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} |
Uncertain if a particular order should be followed. Yes, it is possible for more than one of these headers to be returned but that would be a bad pattern | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
String retryHeaderValue = responseHeaders.getValue(X_MS_RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
try {
return Duration.ofSeconds(Integer.parseInt(retryHeaderValue));
} catch (NumberFormatException ex) {
return Duration.between(OffsetDateTime.now(), new DateTimeRfc1123(retryHeaderValue).getDateTime());
}
}
return retryStrategy.calculateRetryDelay(tryCount);
} | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount == retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static int tryParseHeaderToInt(String headerValue) {
try {
return Integer.parseInt(headerValue);
} catch (NumberFormatException ex) {
return Integer.MIN_VALUE;
}
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static Duration tryParseLongOrDateTime(String value) {
long delaySeconds;
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
delaySeconds = OffsetDateTime.now().until(retryAfter, ChronoUnit.SECONDS);
} catch (DateTimeException ex) {
delaySeconds = tryParseLong(value);
}
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} | |
Yeah. For consistency with other languages, the order is same as what Alan has. (X_MS_RETRY_AFTER_MS_HEADER | RETRY_AFTER_MS_HEADER) and then RETRY_AFTER_HEADER | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
String retryHeaderValue = responseHeaders.getValue(X_MS_RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
try {
return Duration.ofSeconds(Integer.parseInt(retryHeaderValue));
} catch (NumberFormatException ex) {
return Duration.between(OffsetDateTime.now(), new DateTimeRfc1123(retryHeaderValue).getDateTime());
}
}
return retryStrategy.calculateRetryDelay(tryCount);
} | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount == retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static int tryParseHeaderToInt(String headerValue) {
try {
return Integer.parseInt(headerValue);
} catch (NumberFormatException ex) {
return Integer.MIN_VALUE;
}
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static Duration tryParseLongOrDateTime(String value) {
long delaySeconds;
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
delaySeconds = OffsetDateTime.now().until(retryAfter, ChronoUnit.SECONDS);
} catch (DateTimeException ex) {
delaySeconds = tryParseLong(value);
}
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} | |
feel this could be put seperate in the helper method instead of the method `tryParseHeaderToInt` | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
String retryHeaderValue = responseHeaders.getValue(X_MS_RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_MS_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
int delayMs = tryParseHeaderToInt(retryHeaderValue);
if (delayMs != Integer.MIN_VALUE) {
return Duration.ofMillis(delayMs);
}
}
retryHeaderValue = responseHeaders.getValue(RETRY_AFTER_HEADER);
if (!isNullOrEmpty(retryHeaderValue)) {
try {
return Duration.ofSeconds(Integer.parseInt(retryHeaderValue));
} catch (NumberFormatException ex) {
return Duration.between(OffsetDateTime.now(), new DateTimeRfc1123(retryHeaderValue).getDateTime());
}
}
return retryStrategy.calculateRetryDelay(tryCount);
} | } | static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount == retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static int tryParseHeaderToInt(String headerValue) {
try {
return Integer.parseInt(headerValue);
} catch (NumberFormatException ex) {
return Integer.MIN_VALUE;
}
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static Duration tryParseLongOrDateTime(String value) {
long delaySeconds;
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
delaySeconds = OffsetDateTime.now().until(retryAfter, ChronoUnit.SECONDS);
} catch (DateTimeException ex) {
delaySeconds = tryParseLong(value);
}
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} |
A question: Should we check the Duration is not negative? | private static Duration tryParseLongOrDateTime(String value) {
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
return Duration.between(OffsetDateTime.now(), retryAfter);
} catch (DateTimeException ex) {
long delaySeconds = tryParseLong(value);
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
}
} | return Duration.between(OffsetDateTime.now(), retryAfter); | private static Duration tryParseLongOrDateTime(String value) {
long delaySeconds;
try {
OffsetDateTime retryAfter = new DateTimeRfc1123(value).getDateTime();
delaySeconds = OffsetDateTime.now().until(retryAfter, ChronoUnit.SECONDS);
} catch (DateTimeException ex) {
delaySeconds = tryParseLong(value);
}
return (delaySeconds >= 0) ? Duration.ofSeconds(delaySeconds) : null;
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
}
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final String RETRY_AFTER_HEADER = "Retry-After";
private static final String RETRY_AFTER_MS_HEADER = "retry-after-ms";
private static final String X_MS_RETRY_AFTER_MS_HEADER = "x-ms-retry-after-ms";
private final ClientLogger logger = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final String retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = retryAfterHeader;
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptAsync(context, next, context.getHttpRequest(), 0);
}
private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest, final int tryCount) {
context.setHttpRequest(originalHttpRequest.copy());
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
return next.clone().process()
.flatMap(httpResponse -> {
if (shouldRetry(httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount,
delayDuration.getSeconds());
Flux<ByteBuffer> responseBody = httpResponse.getBody();
if (responseBody == null) {
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration);
} else {
return httpResponse.getBody()
.ignoreElements()
.then(attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(delayDuration));
}
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount);
}
return Mono.just(httpResponse);
}
})
.onErrorResume(err -> {
if (shouldRetryException(err, tryCount)) {
logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1)
.delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logger.info("Retry attempts have been exhausted after {} attempts.", tryCount, err);
return Mono.error(err);
}
});
}
private boolean shouldRetry(HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private boolean shouldRetryException(Throwable throwable, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetryException(throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (isNullOrEmpty(retryAfterHeader)) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy) {
Duration retryDelay = tryGetRetryDelay(responseHeaders, X_MS_RETRY_AFTER_MS_HEADER,
RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_MS_HEADER, RetryPolicy::tryGetDelayMillis);
if (retryDelay != null) {
return retryDelay;
}
retryDelay = tryGetRetryDelay(responseHeaders, RETRY_AFTER_HEADER, RetryPolicy::tryParseLongOrDateTime);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
}
private static Duration tryGetRetryDelay(HttpHeaders headers, String headerName,
Function<String, Duration> delayParser) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : delayParser.apply(headerValue);
}
private static Duration tryGetDelayMillis(String value) {
long delayMillis = tryParseLong(value);
return (delayMillis >= 0) ? Duration.ofMillis(delayMillis) : null;
}
private static long tryParseLong(String value) {
try {
return Long.parseLong(value);
} catch (NumberFormatException ex) {
return -1;
}
}
} |
default maxRetry is 3. Does 3rd retry count as valid retry? | public void defaultRetryPolicyRetriesExpectedErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else if (count == 1) {
return Mono.just(new MockHttpResponse(request, 200));
} else {
return Mono.just(new MockHttpResponse(request, 400));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
} | return Mono.just(new MockHttpResponse(request, 400)); | public void defaultRetryPolicyRetriesExpectedErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else if (count == 1) {
return Mono.just(new MockHttpResponse(request, 200));
} else {
return Mono.just(new MockHttpResponse(request, 400));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
} | class RetryPolicyTests {
@ParameterizedTest
@ValueSource(ints = {408, 429, 500, 502, 503})
@ParameterizedTest
@ValueSource(ints = {400, 401, 402, 403, 404, 409, 412, 501, 505})
public void defaultRetryPolicyDoesntRetryOnErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(returnCode, response.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("defaultRetryPolicyRetriesAllExceptionsSupplier")
public void defaultRetryPolicyRetriesAllExceptions(Throwable throwable) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.error(throwable);
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Throwable> defaultRetryPolicyRetriesAllExceptionsSupplier() {
return Stream.of(
new Throwable(),
new MalformedURLException(),
new RuntimeException(),
new IllegalStateException(),
new TimeoutException()
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryStatusCodesSupplier")
public void customRetryPolicyCanDetermineRetryStatusCodes(RetryStrategy retryStrategy, int[] statusCodes,
int expectedStatusCode) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.just(new MockHttpResponse(request, statusCodes[attempt.getAndIncrement()])))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryStatusCodesSupplier() {
RetryStrategy onlyRetries429And503 = createStatusCodeRetryStrategy(429, 503);
RetryStrategy onlyRetries409And412 = createStatusCodeRetryStrategy(409, 412);
return Stream.of(
Arguments.of(onlyRetries429And503, new int[]{429, 503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 503}, 503),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 429}, 429),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 409}, 409),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 412}, 412)
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryExceptionsSupplier")
public void customRetryPolicyCanDetermineRetryExceptions(RetryStrategy retryStrategy, Throwable[] exceptions,
Class<? extends Throwable> expectedException) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.error(exceptions[attempt.getAndIncrement()]))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.verifyError(expectedException);
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryExceptionsSupplier() {
RetryStrategy onlyRetriesIOExceptions = createExceptionRetryStrategy(
Collections.singletonList(IOException.class));
RetryStrategy onlyRetriesTimeoutAndRuntimeExceptions = createExceptionRetryStrategy(
Arrays.asList(TimeoutException.class, RuntimeException.class));
return Stream.of(
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new RuntimeException()}, RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new RuntimeException()},
RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new TimeoutException()},
TimeoutException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new RuntimeException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new TimeoutException()}, TimeoutException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new RuntimeException()}, RuntimeException.class)
);
}
@Test
public void retryMax() {
final int maxRetries = 5;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
Assertions.assertTrue(count++ < maxRetries);
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(1))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void fixedDelayRetry() {
final int maxRetries = 5;
final long delayMillis = 500;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
Assertions.assertTrue(System.currentTimeMillis() >= previousAttemptMadeAt + delayMillis);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(delayMillis))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void exponentialDelayRetry() {
final int maxRetries = 5;
final long baseDelayMillis = 100;
final long maxDelayMillis = 1000;
ExponentialBackoff exponentialBackoff = new ExponentialBackoff(maxRetries, Duration.ofMillis(baseDelayMillis),
Duration.ofMillis(maxDelayMillis));
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
long requestMadeAt = System.currentTimeMillis();
long expectedToBeMadeAt =
previousAttemptMadeAt + ((1L << (count - 1)) * (long) (baseDelayMillis * 0.95));
Assertions.assertTrue(requestMadeAt >= expectedToBeMadeAt);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 503));
}
})
.policies(new RetryPolicy(exponentialBackoff))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(503, response.getStatusCode()))
.verifyComplete();
}
@Test
public void retryConsumesBody() {
final AtomicInteger bodyConsumptionCount = new AtomicInteger();
Flux<ByteBuffer> errorBody = Flux.generate(sink -> {
bodyConsumptionCount.incrementAndGet();
sink.next(ByteBuffer.wrap("Should be consumed" .getBytes(StandardCharsets.UTF_8)));
sink.complete();
});
final HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(new FixedDelay(2, Duration.ofMillis(1))))
.httpClient(request -> Mono.just(new HttpResponse(request) {
@Override
public int getStatusCode() {
return 503;
}
@Override
public String getHeaderValue(String name) {
return getHeaders().getValue(name);
}
@Override
public HttpHeaders getHeaders() {
return new HttpHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return errorBody;
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesInByteBufferStream(getBody());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsString(StandardCharsets.UTF_8);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
}))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "https:
.expectNextCount(1)
.verifyComplete();
assertEquals(2, bodyConsumptionCount.get());
}
@ParameterizedTest
@MethodSource("getWellKnownRetryDelaySupplier")
public void getWellKnownRetryDelay(HttpHeaders responseHeaders, RetryStrategy retryStrategy, Duration expected) {
assertEquals(expected, RetryPolicy.getWellKnownRetryDelay(responseHeaders, 1, retryStrategy));
}
private static Stream<Arguments> getWellKnownRetryDelaySupplier() {
RetryStrategy retryStrategy = mock(RetryStrategy.class);
when(retryStrategy.calculateRetryDelay(anyInt())).thenReturn(Duration.ofSeconds(1));
return Stream.of(
Arguments.of(new HttpHeaders(), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "10"), retryStrategy, Duration.ofMillis(10)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "64"), retryStrategy, Duration.ofMillis(64)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "10"), retryStrategy, Duration.ofSeconds(10)),
Arguments.of(new HttpHeaders().set("Retry-After", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", OffsetDateTime.now().minusMinutes(1)
.format(DateTimeFormatter.RFC_1123_DATE_TIME)), retryStrategy, Duration.ofSeconds(1))
);
}
@Test
public void retryAfterDateTime() {
HttpHeaders headers = new HttpHeaders().set("Retry-After",
new DateTimeRfc1123(OffsetDateTime.now().plusSeconds(30)).toString());
Duration actual = RetryPolicy.getWellKnownRetryDelay(headers, 1, null);
Duration skew = Duration.ofSeconds(30).minus(actual);
assertTrue(skew.getSeconds() < 2);
}
private static RetryStrategy createStatusCodeRetryStrategy(int... retriableErrorCodes) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetry(HttpResponse httpResponse) {
return Arrays.stream(retriableErrorCodes)
.anyMatch(retriableErrorCode -> httpResponse.getStatusCode() == retriableErrorCode);
}
};
}
private static RetryStrategy createExceptionRetryStrategy(List<Class<? extends Throwable>> retriableExceptions) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetryException(Throwable throwable) {
return retriableExceptions.stream()
.anyMatch(retriableException -> retriableException.isAssignableFrom(throwable.getClass()));
}
};
}
} | class RetryPolicyTests {
@ParameterizedTest
@ValueSource(ints = {408, 429, 500, 502, 503})
@ParameterizedTest
@ValueSource(ints = {400, 401, 402, 403, 404, 409, 412, 501, 505})
public void defaultRetryPolicyDoesntRetryOnErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(returnCode, response.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("defaultRetryPolicyRetriesAllExceptionsSupplier")
public void defaultRetryPolicyRetriesAllExceptions(Throwable throwable) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.error(throwable);
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Throwable> defaultRetryPolicyRetriesAllExceptionsSupplier() {
return Stream.of(
new Throwable(),
new MalformedURLException(),
new RuntimeException(),
new IllegalStateException(),
new TimeoutException()
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryStatusCodesSupplier")
public void customRetryPolicyCanDetermineRetryStatusCodes(RetryStrategy retryStrategy, int[] statusCodes,
int expectedStatusCode) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.just(new MockHttpResponse(request, statusCodes[attempt.getAndIncrement()])))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryStatusCodesSupplier() {
RetryStrategy onlyRetries429And503 = createStatusCodeRetryStrategy(429, 503);
RetryStrategy onlyRetries409And412 = createStatusCodeRetryStrategy(409, 412);
return Stream.of(
Arguments.of(onlyRetries429And503, new int[]{429, 503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 503}, 503),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 429}, 429),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 409}, 409),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 412}, 412)
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryExceptionsSupplier")
public void customRetryPolicyCanDetermineRetryExceptions(RetryStrategy retryStrategy, Throwable[] exceptions,
Class<? extends Throwable> expectedException) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.error(exceptions[attempt.getAndIncrement()]))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.verifyError(expectedException);
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryExceptionsSupplier() {
RetryStrategy onlyRetriesIOExceptions = createExceptionRetryStrategy(
Collections.singletonList(IOException.class));
RetryStrategy onlyRetriesTimeoutAndRuntimeExceptions = createExceptionRetryStrategy(
Arrays.asList(TimeoutException.class, RuntimeException.class));
return Stream.of(
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new RuntimeException()}, RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new RuntimeException()},
RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new TimeoutException()},
TimeoutException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new RuntimeException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new TimeoutException()}, TimeoutException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new RuntimeException()}, RuntimeException.class)
);
}
@Test
public void retryMax() {
final int maxRetries = 5;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
Assertions.assertTrue(count++ < maxRetries);
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(1))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void fixedDelayRetry() {
final int maxRetries = 5;
final long delayMillis = 500;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
Assertions.assertTrue(System.currentTimeMillis() >= previousAttemptMadeAt + delayMillis);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(delayMillis))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void exponentialDelayRetry() {
final int maxRetries = 5;
final long baseDelayMillis = 100;
final long maxDelayMillis = 1000;
ExponentialBackoff exponentialBackoff = new ExponentialBackoff(maxRetries, Duration.ofMillis(baseDelayMillis),
Duration.ofMillis(maxDelayMillis));
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
long requestMadeAt = System.currentTimeMillis();
long expectedToBeMadeAt =
previousAttemptMadeAt + ((1L << (count - 1)) * (long) (baseDelayMillis * 0.95));
Assertions.assertTrue(requestMadeAt >= expectedToBeMadeAt);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 503));
}
})
.policies(new RetryPolicy(exponentialBackoff))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(503, response.getStatusCode()))
.verifyComplete();
}
@Test
public void retryConsumesBody() {
final AtomicInteger bodyConsumptionCount = new AtomicInteger();
Flux<ByteBuffer> errorBody = Flux.generate(sink -> {
bodyConsumptionCount.incrementAndGet();
sink.next(ByteBuffer.wrap("Should be consumed" .getBytes(StandardCharsets.UTF_8)));
sink.complete();
});
final HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(new FixedDelay(2, Duration.ofMillis(1))))
.httpClient(request -> Mono.just(new HttpResponse(request) {
@Override
public int getStatusCode() {
return 503;
}
@Override
public String getHeaderValue(String name) {
return getHeaders().getValue(name);
}
@Override
public HttpHeaders getHeaders() {
return new HttpHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return errorBody;
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesInByteBufferStream(getBody());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsString(StandardCharsets.UTF_8);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
}))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "https:
.expectNextCount(1)
.verifyComplete();
assertEquals(2, bodyConsumptionCount.get());
}
@ParameterizedTest
@MethodSource("getWellKnownRetryDelaySupplier")
public void getWellKnownRetryDelay(HttpHeaders responseHeaders, RetryStrategy retryStrategy, Duration expected) {
assertEquals(expected, RetryPolicy.getWellKnownRetryDelay(responseHeaders, 1, retryStrategy));
}
private static Stream<Arguments> getWellKnownRetryDelaySupplier() {
RetryStrategy retryStrategy = mock(RetryStrategy.class);
when(retryStrategy.calculateRetryDelay(anyInt())).thenReturn(Duration.ofSeconds(1));
return Stream.of(
Arguments.of(new HttpHeaders(), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "10"), retryStrategy, Duration.ofMillis(10)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "64"), retryStrategy, Duration.ofMillis(64)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "10"), retryStrategy, Duration.ofSeconds(10)),
Arguments.of(new HttpHeaders().set("Retry-After", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", OffsetDateTime.now().minusMinutes(1)
.format(DateTimeFormatter.RFC_1123_DATE_TIME)), retryStrategy, Duration.ofSeconds(1))
);
}
@Test
public void retryAfterDateTime() {
HttpHeaders headers = new HttpHeaders().set("Retry-After",
new DateTimeRfc1123(OffsetDateTime.now().plusSeconds(30)).toString());
Duration actual = RetryPolicy.getWellKnownRetryDelay(headers, 1, null);
Duration skew = Duration.ofSeconds(30).minus(actual);
assertTrue(skew.getSeconds() < 2);
}
private static RetryStrategy createStatusCodeRetryStrategy(int... retriableErrorCodes) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetry(HttpResponse httpResponse) {
return Arrays.stream(retriableErrorCodes)
.anyMatch(retriableErrorCode -> httpResponse.getStatusCode() == retriableErrorCode);
}
};
}
private static RetryStrategy createExceptionRetryStrategy(List<Class<? extends Throwable>> retriableExceptions) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetryException(Throwable throwable) {
return retriableExceptions.stream()
.anyMatch(retriableException -> retriableException.isAssignableFrom(throwable.getClass()));
}
};
}
} |
The default retry policy shouldn't retry on 200, so it shouldn't hit this code path | public void defaultRetryPolicyRetriesExpectedErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else if (count == 1) {
return Mono.just(new MockHttpResponse(request, 200));
} else {
return Mono.just(new MockHttpResponse(request, 400));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
} | return Mono.just(new MockHttpResponse(request, 400)); | public void defaultRetryPolicyRetriesExpectedErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else if (count == 1) {
return Mono.just(new MockHttpResponse(request, 200));
} else {
return Mono.just(new MockHttpResponse(request, 400));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
} | class RetryPolicyTests {
@ParameterizedTest
@ValueSource(ints = {408, 429, 500, 502, 503})
@ParameterizedTest
@ValueSource(ints = {400, 401, 402, 403, 404, 409, 412, 501, 505})
public void defaultRetryPolicyDoesntRetryOnErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(returnCode, response.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("defaultRetryPolicyRetriesAllExceptionsSupplier")
public void defaultRetryPolicyRetriesAllExceptions(Throwable throwable) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.error(throwable);
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Throwable> defaultRetryPolicyRetriesAllExceptionsSupplier() {
return Stream.of(
new Throwable(),
new MalformedURLException(),
new RuntimeException(),
new IllegalStateException(),
new TimeoutException()
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryStatusCodesSupplier")
public void customRetryPolicyCanDetermineRetryStatusCodes(RetryStrategy retryStrategy, int[] statusCodes,
int expectedStatusCode) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.just(new MockHttpResponse(request, statusCodes[attempt.getAndIncrement()])))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryStatusCodesSupplier() {
RetryStrategy onlyRetries429And503 = createStatusCodeRetryStrategy(429, 503);
RetryStrategy onlyRetries409And412 = createStatusCodeRetryStrategy(409, 412);
return Stream.of(
Arguments.of(onlyRetries429And503, new int[]{429, 503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 503}, 503),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 429}, 429),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 409}, 409),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 412}, 412)
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryExceptionsSupplier")
public void customRetryPolicyCanDetermineRetryExceptions(RetryStrategy retryStrategy, Throwable[] exceptions,
Class<? extends Throwable> expectedException) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.error(exceptions[attempt.getAndIncrement()]))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.verifyError(expectedException);
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryExceptionsSupplier() {
RetryStrategy onlyRetriesIOExceptions = createExceptionRetryStrategy(
Collections.singletonList(IOException.class));
RetryStrategy onlyRetriesTimeoutAndRuntimeExceptions = createExceptionRetryStrategy(
Arrays.asList(TimeoutException.class, RuntimeException.class));
return Stream.of(
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new RuntimeException()}, RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new RuntimeException()},
RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new TimeoutException()},
TimeoutException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new RuntimeException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new TimeoutException()}, TimeoutException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new RuntimeException()}, RuntimeException.class)
);
}
@Test
public void retryMax() {
final int maxRetries = 5;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
Assertions.assertTrue(count++ < maxRetries);
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(1))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void fixedDelayRetry() {
final int maxRetries = 5;
final long delayMillis = 500;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
Assertions.assertTrue(System.currentTimeMillis() >= previousAttemptMadeAt + delayMillis);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(delayMillis))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void exponentialDelayRetry() {
final int maxRetries = 5;
final long baseDelayMillis = 100;
final long maxDelayMillis = 1000;
ExponentialBackoff exponentialBackoff = new ExponentialBackoff(maxRetries, Duration.ofMillis(baseDelayMillis),
Duration.ofMillis(maxDelayMillis));
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
long requestMadeAt = System.currentTimeMillis();
long expectedToBeMadeAt =
previousAttemptMadeAt + ((1L << (count - 1)) * (long) (baseDelayMillis * 0.95));
Assertions.assertTrue(requestMadeAt >= expectedToBeMadeAt);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 503));
}
})
.policies(new RetryPolicy(exponentialBackoff))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(503, response.getStatusCode()))
.verifyComplete();
}
@Test
public void retryConsumesBody() {
final AtomicInteger bodyConsumptionCount = new AtomicInteger();
Flux<ByteBuffer> errorBody = Flux.generate(sink -> {
bodyConsumptionCount.incrementAndGet();
sink.next(ByteBuffer.wrap("Should be consumed" .getBytes(StandardCharsets.UTF_8)));
sink.complete();
});
final HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(new FixedDelay(2, Duration.ofMillis(1))))
.httpClient(request -> Mono.just(new HttpResponse(request) {
@Override
public int getStatusCode() {
return 503;
}
@Override
public String getHeaderValue(String name) {
return getHeaders().getValue(name);
}
@Override
public HttpHeaders getHeaders() {
return new HttpHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return errorBody;
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesInByteBufferStream(getBody());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsString(StandardCharsets.UTF_8);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
}))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "https:
.expectNextCount(1)
.verifyComplete();
assertEquals(2, bodyConsumptionCount.get());
}
@ParameterizedTest
@MethodSource("getWellKnownRetryDelaySupplier")
public void getWellKnownRetryDelay(HttpHeaders responseHeaders, RetryStrategy retryStrategy, Duration expected) {
assertEquals(expected, RetryPolicy.getWellKnownRetryDelay(responseHeaders, 1, retryStrategy));
}
private static Stream<Arguments> getWellKnownRetryDelaySupplier() {
RetryStrategy retryStrategy = mock(RetryStrategy.class);
when(retryStrategy.calculateRetryDelay(anyInt())).thenReturn(Duration.ofSeconds(1));
return Stream.of(
Arguments.of(new HttpHeaders(), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "10"), retryStrategy, Duration.ofMillis(10)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "64"), retryStrategy, Duration.ofMillis(64)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "10"), retryStrategy, Duration.ofSeconds(10)),
Arguments.of(new HttpHeaders().set("Retry-After", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", OffsetDateTime.now().minusMinutes(1)
.format(DateTimeFormatter.RFC_1123_DATE_TIME)), retryStrategy, Duration.ofSeconds(1))
);
}
@Test
public void retryAfterDateTime() {
HttpHeaders headers = new HttpHeaders().set("Retry-After",
new DateTimeRfc1123(OffsetDateTime.now().plusSeconds(30)).toString());
Duration actual = RetryPolicy.getWellKnownRetryDelay(headers, 1, null);
Duration skew = Duration.ofSeconds(30).minus(actual);
assertTrue(skew.getSeconds() < 2);
}
private static RetryStrategy createStatusCodeRetryStrategy(int... retriableErrorCodes) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetry(HttpResponse httpResponse) {
return Arrays.stream(retriableErrorCodes)
.anyMatch(retriableErrorCode -> httpResponse.getStatusCode() == retriableErrorCode);
}
};
}
private static RetryStrategy createExceptionRetryStrategy(List<Class<? extends Throwable>> retriableExceptions) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetryException(Throwable throwable) {
return retriableExceptions.stream()
.anyMatch(retriableException -> retriableException.isAssignableFrom(throwable.getClass()));
}
};
}
} | class RetryPolicyTests {
@ParameterizedTest
@ValueSource(ints = {408, 429, 500, 502, 503})
@ParameterizedTest
@ValueSource(ints = {400, 401, 402, 403, 404, 409, 412, 501, 505})
public void defaultRetryPolicyDoesntRetryOnErrorCodes(int returnCode) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.just(new MockHttpResponse(request, returnCode));
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(returnCode, response.getStatusCode()))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("defaultRetryPolicyRetriesAllExceptionsSupplier")
public void defaultRetryPolicyRetriesAllExceptions(Throwable throwable) {
AtomicInteger attemptCount = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy())
.httpClient(request -> {
int count = attemptCount.getAndIncrement();
if (count == 0) {
return Mono.error(throwable);
} else {
return Mono.just(new MockHttpResponse(request, 200));
}
})
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Throwable> defaultRetryPolicyRetriesAllExceptionsSupplier() {
return Stream.of(
new Throwable(),
new MalformedURLException(),
new RuntimeException(),
new IllegalStateException(),
new TimeoutException()
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryStatusCodesSupplier")
public void customRetryPolicyCanDetermineRetryStatusCodes(RetryStrategy retryStrategy, int[] statusCodes,
int expectedStatusCode) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.just(new MockHttpResponse(request, statusCodes[attempt.getAndIncrement()])))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.verifyComplete();
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryStatusCodesSupplier() {
RetryStrategy onlyRetries429And503 = createStatusCodeRetryStrategy(429, 503);
RetryStrategy onlyRetries409And412 = createStatusCodeRetryStrategy(409, 412);
return Stream.of(
Arguments.of(onlyRetries429And503, new int[]{429, 503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{503, 404}, 404),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 503}, 503),
Arguments.of(onlyRetries429And503, new int[]{429, 503, 429}, 429),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{412, 404}, 404),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 409}, 409),
Arguments.of(onlyRetries409And412, new int[]{409, 412, 412}, 412)
);
}
@ParameterizedTest
@MethodSource("customRetryPolicyCanDetermineRetryExceptionsSupplier")
public void customRetryPolicyCanDetermineRetryExceptions(RetryStrategy retryStrategy, Throwable[] exceptions,
Class<? extends Throwable> expectedException) {
AtomicInteger attempt = new AtomicInteger();
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(retryStrategy))
.httpClient(request -> Mono.error(exceptions[attempt.getAndIncrement()]))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.verifyError(expectedException);
}
private static Stream<Arguments> customRetryPolicyCanDetermineRetryExceptionsSupplier() {
RetryStrategy onlyRetriesIOExceptions = createExceptionRetryStrategy(
Collections.singletonList(IOException.class));
RetryStrategy onlyRetriesTimeoutAndRuntimeExceptions = createExceptionRetryStrategy(
Arrays.asList(TimeoutException.class, RuntimeException.class));
return Stream.of(
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new RuntimeException()}, RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new RuntimeException()},
RuntimeException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new TimeoutException()},
TimeoutException.class),
Arguments.of(onlyRetriesIOExceptions, new Throwable[]{new IOException(), new IOException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new RuntimeException(),
new IOException()}, IOException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new TimeoutException()}, TimeoutException.class),
Arguments.of(onlyRetriesTimeoutAndRuntimeExceptions, new Throwable[]{new TimeoutException(),
new RuntimeException(), new RuntimeException()}, RuntimeException.class)
);
}
@Test
public void retryMax() {
final int maxRetries = 5;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
Assertions.assertTrue(count++ < maxRetries);
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(1))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void fixedDelayRetry() {
final int maxRetries = 5;
final long delayMillis = 500;
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
Assertions.assertTrue(System.currentTimeMillis() >= previousAttemptMadeAt + delayMillis);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 500));
}
})
.policies(new RetryPolicy(new FixedDelay(maxRetries, Duration.ofMillis(delayMillis))))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(500, response.getStatusCode()))
.verifyComplete();
}
@Test
public void exponentialDelayRetry() {
final int maxRetries = 5;
final long baseDelayMillis = 100;
final long maxDelayMillis = 1000;
ExponentialBackoff exponentialBackoff = new ExponentialBackoff(maxRetries, Duration.ofMillis(baseDelayMillis),
Duration.ofMillis(maxDelayMillis));
final HttpPipeline pipeline = new HttpPipelineBuilder()
.httpClient(new NoOpHttpClient() {
int count = -1;
long previousAttemptMadeAt = -1;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (count > 0) {
long requestMadeAt = System.currentTimeMillis();
long expectedToBeMadeAt =
previousAttemptMadeAt + ((1L << (count - 1)) * (long) (baseDelayMillis * 0.95));
Assertions.assertTrue(requestMadeAt >= expectedToBeMadeAt);
}
Assertions.assertTrue(count++ < maxRetries);
previousAttemptMadeAt = System.currentTimeMillis();
return Mono.just(new MockHttpResponse(request, 503));
}
})
.policies(new RetryPolicy(exponentialBackoff))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "http:
.assertNext(response -> assertEquals(503, response.getStatusCode()))
.verifyComplete();
}
@Test
public void retryConsumesBody() {
final AtomicInteger bodyConsumptionCount = new AtomicInteger();
Flux<ByteBuffer> errorBody = Flux.generate(sink -> {
bodyConsumptionCount.incrementAndGet();
sink.next(ByteBuffer.wrap("Should be consumed" .getBytes(StandardCharsets.UTF_8)));
sink.complete();
});
final HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(new RetryPolicy(new FixedDelay(2, Duration.ofMillis(1))))
.httpClient(request -> Mono.just(new HttpResponse(request) {
@Override
public int getStatusCode() {
return 503;
}
@Override
public String getHeaderValue(String name) {
return getHeaders().getValue(name);
}
@Override
public HttpHeaders getHeaders() {
return new HttpHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return errorBody;
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesInByteBufferStream(getBody());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsString(StandardCharsets.UTF_8);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
}))
.build();
StepVerifier.create(pipeline.send(new HttpRequest(HttpMethod.GET, "https:
.expectNextCount(1)
.verifyComplete();
assertEquals(2, bodyConsumptionCount.get());
}
@ParameterizedTest
@MethodSource("getWellKnownRetryDelaySupplier")
public void getWellKnownRetryDelay(HttpHeaders responseHeaders, RetryStrategy retryStrategy, Duration expected) {
assertEquals(expected, RetryPolicy.getWellKnownRetryDelay(responseHeaders, 1, retryStrategy));
}
private static Stream<Arguments> getWellKnownRetryDelaySupplier() {
RetryStrategy retryStrategy = mock(RetryStrategy.class);
when(retryStrategy.calculateRetryDelay(anyInt())).thenReturn(Duration.ofSeconds(1));
return Stream.of(
Arguments.of(new HttpHeaders(), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "10"), retryStrategy, Duration.ofMillis(10)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("x-ms-retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "64"), retryStrategy, Duration.ofMillis(64)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("retry-after-ms", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "10"), retryStrategy, Duration.ofSeconds(10)),
Arguments.of(new HttpHeaders().set("Retry-After", "-10"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", "ten"), retryStrategy, Duration.ofSeconds(1)),
Arguments.of(new HttpHeaders().set("Retry-After", OffsetDateTime.now().minusMinutes(1)
.format(DateTimeFormatter.RFC_1123_DATE_TIME)), retryStrategy, Duration.ofSeconds(1))
);
}
@Test
public void retryAfterDateTime() {
HttpHeaders headers = new HttpHeaders().set("Retry-After",
new DateTimeRfc1123(OffsetDateTime.now().plusSeconds(30)).toString());
Duration actual = RetryPolicy.getWellKnownRetryDelay(headers, 1, null);
Duration skew = Duration.ofSeconds(30).minus(actual);
assertTrue(skew.getSeconds() < 2);
}
private static RetryStrategy createStatusCodeRetryStrategy(int... retriableErrorCodes) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetry(HttpResponse httpResponse) {
return Arrays.stream(retriableErrorCodes)
.anyMatch(retriableErrorCode -> httpResponse.getStatusCode() == retriableErrorCode);
}
};
}
private static RetryStrategy createExceptionRetryStrategy(List<Class<? extends Throwable>> retriableExceptions) {
return new RetryStrategy() {
@Override
public int getMaxRetries() {
return 2;
}
@Override
public Duration calculateRetryDelay(int retryAttempts) {
return Duration.ofMillis(1);
}
@Override
public boolean shouldRetryException(Throwable throwable) {
return retriableExceptions.stream()
.anyMatch(retriableException -> retriableException.isAssignableFrom(throwable.getClass()));
}
};
}
} |
Instead of null pointer, we should throw some other exception , like illegalargumentexception | static IndexUtilizationInfo createFromJSONString(String jsonString) {
if (StringUtils.isEmpty(jsonString)) {
throw new NullPointerException("jsonString");
}
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
return indexUtilizationInfo;
} | throw new NullPointerException("jsonString"); | static IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
} | class IndexUtilizationInfo {
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {
super();
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
throw new NullPointerException("indexUtilizationInfoCollection");
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo == null) {
throw new NullPointerException("queryPreparationTimesList can not have a null element");
}
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} | class IndexUtilizationInfo {
private final static Logger logger = LoggerFactory.getLogger(IndexUtilizationInfo.class.getSimpleName());
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
return null;
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo != null) {
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} |
Please use logger here | static IndexUtilizationInfo createFromJSONString(String jsonString) {
if (StringUtils.isEmpty(jsonString)) {
throw new NullPointerException("jsonString");
}
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
return indexUtilizationInfo;
} | e.printStackTrace(); | static IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
} | class IndexUtilizationInfo {
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {
super();
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
throw new NullPointerException("indexUtilizationInfoCollection");
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo == null) {
throw new NullPointerException("queryPreparationTimesList can not have a null element");
}
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} | class IndexUtilizationInfo {
private final static Logger logger = LoggerFactory.getLogger(IndexUtilizationInfo.class.getSimpleName());
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
return null;
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo != null) {
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} |
Removed this check, as I am doing this check already in QueryMetrics.java (lines 290 - 292). | static IndexUtilizationInfo createFromJSONString(String jsonString) {
if (StringUtils.isEmpty(jsonString)) {
throw new NullPointerException("jsonString");
}
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
return indexUtilizationInfo;
} | throw new NullPointerException("jsonString"); | static IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
} | class IndexUtilizationInfo {
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {
super();
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
throw new NullPointerException("indexUtilizationInfoCollection");
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo == null) {
throw new NullPointerException("queryPreparationTimesList can not have a null element");
}
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} | class IndexUtilizationInfo {
private final static Logger logger = LoggerFactory.getLogger(IndexUtilizationInfo.class.getSimpleName());
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
return null;
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo != null) {
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} |
Added | static IndexUtilizationInfo createFromJSONString(String jsonString) {
if (StringUtils.isEmpty(jsonString)) {
throw new NullPointerException("jsonString");
}
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
return indexUtilizationInfo;
} | e.printStackTrace(); | static IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
} | class IndexUtilizationInfo {
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {
super();
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
throw new NullPointerException("indexUtilizationInfoCollection");
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo == null) {
throw new NullPointerException("queryPreparationTimesList can not have a null element");
}
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} | class IndexUtilizationInfo {
private final static Logger logger = LoggerFactory.getLogger(IndexUtilizationInfo.class.getSimpleName());
static final IndexUtilizationInfo ZERO = new IndexUtilizationInfo(
new ArrayList<>(), /* utilizedSingleIndexes */
new ArrayList<>(), /* potentialSingleIndexes */
new ArrayList<>(), /* utilizedCompositeIndexes */
new ArrayList<>()); /* potentialCompositeIndexes */
@JsonProperty(value = "UtilizedSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> utilizedSingleIndexes;
@JsonProperty(value = "PotentialSingleIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<SingleIndexUtilizationEntity> potentialSingleIndexes;
@JsonProperty(value = "UtilizedCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes;
@JsonProperty(value = "PotentialCompositeIndexes", access = JsonProperty.Access.WRITE_ONLY)
private List<CompositeIndexUtilizationEntity> potentialCompositeIndexes;
IndexUtilizationInfo() {}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
* @param potentialSingleIndexes -> The potential single indexes list.
* @param utilizedCompositeIndexes -> The potential composite indexes list.
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
IndexUtilizationInfo(List<SingleIndexUtilizationEntity> utilizedSingleIndexes, List<SingleIndexUtilizationEntity> potentialSingleIndexes, List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes, List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
this.potentialSingleIndexes = potentialSingleIndexes;
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
/**
* @return utilizedSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getUtilizedSingleIndexes() {
return utilizedSingleIndexes;
}
/**
* @return potentialSingleIndexes
*/
public List<SingleIndexUtilizationEntity> getPotentialSingleIndexes() {
return potentialSingleIndexes;
}
/**
* @return utilizedCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getUtilizedCompositeIndexes() {
return utilizedCompositeIndexes;
}
/**
* @return potentialCompositeIndexes
*/
public List<CompositeIndexUtilizationEntity> getPotentialCompositeIndexes() {
return potentialCompositeIndexes;
}
/**
* @param utilizedSingleIndexes -> The utilized single indexes list.
*/
public void setUtilizedSingleIndexes(List<SingleIndexUtilizationEntity> utilizedSingleIndexes) {
this.utilizedSingleIndexes = utilizedSingleIndexes;
}
/**
* @param potentialSingleIndexes -> The potential single indexes list.
*/
public void setPotentialSingleIndexes(List<SingleIndexUtilizationEntity> potentialSingleIndexes) {
this.potentialSingleIndexes = potentialSingleIndexes;
}
/**
* @param utilizedCompositeIndexes -> The potential composite indexes list.
*/
public void setUtilizedCompositeIndexes(List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes) {
this.utilizedCompositeIndexes = utilizedCompositeIndexes;
}
/**
* @param potentialCompositeIndexes -> The utilized composite indexes list.
*/
public void setPotentialCompositeIndexes(List<CompositeIndexUtilizationEntity> potentialCompositeIndexes) {
this.potentialCompositeIndexes = potentialCompositeIndexes;
}
static IndexUtilizationInfo createFromCollection(
Collection<IndexUtilizationInfo> indexUtilizationInfoCollection) {
if (indexUtilizationInfoCollection == null) {
return null;
}
List<SingleIndexUtilizationEntity> utilizedSingleIndexes = new ArrayList<>();
List<SingleIndexUtilizationEntity> potentialSingleIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> utilizedCompositeIndexes = new ArrayList<>();
List<CompositeIndexUtilizationEntity> potentialCompositeIndexes = new ArrayList<>();
for (IndexUtilizationInfo indexUtilizationInfo : indexUtilizationInfoCollection) {
if (indexUtilizationInfo != null) {
utilizedSingleIndexes.addAll(indexUtilizationInfo.utilizedSingleIndexes);
potentialSingleIndexes.addAll(indexUtilizationInfo.potentialSingleIndexes);
utilizedCompositeIndexes.addAll(indexUtilizationInfo.utilizedCompositeIndexes);
potentialCompositeIndexes.addAll(indexUtilizationInfo.potentialCompositeIndexes);
}
}
return new IndexUtilizationInfo(
utilizedSingleIndexes,
potentialSingleIndexes,
utilizedCompositeIndexes,
potentialCompositeIndexes);
}
} |
Map | private Object createFeature(FeatureFlagConfigurationSetting item) throws IOException {
String key = getFeatureSimpleName(item);
Feature feature = new Feature(key, item);
HashMap<Integer, FeatureFlagFilter> featureEnabledFor = feature.getEnabledFor();
if (featureEnabledFor.size() == 0 && item.isEnabled()) {
return true;
} else if (!item.isEnabled()) {
return false;
}
for (int filter = 0; filter < feature.getEnabledFor().size(); filter++) {
FeatureFlagFilter featureFilterEvaluationContext = featureEnabledFor.get(filter);
Map<String, Object> parameters = featureFilterEvaluationContext.getParameters();
if (parameters == null || !featureEnabledFor.get(filter).getName().equals(TARGETING_FILTER)) {
continue;
}
Object audienceObject = parameters.get(AUDIENCE);
if (audienceObject != null) {
parameters = (Map<String, Object>) audienceObject;
}
List<Object> users = convertToListOrEmptyList(parameters, USERS_CAPS);
List<Object> groupRollouts = convertToListOrEmptyList(parameters, GROUPS_CAPS);
switchKeyValues(parameters, USERS_CAPS, USERS, mapValuesByIndex(users));
switchKeyValues(parameters, GROUPS_CAPS, GROUPS, mapValuesByIndex(groupRollouts));
switchKeyValues(parameters, DEFAULT_ROLLOUT_PERCENTAGE_CAPS, DEFAULT_ROLLOUT_PERCENTAGE,
parameters.get(DEFAULT_ROLLOUT_PERCENTAGE_CAPS));
featureFilterEvaluationContext.setParameters(parameters);
featureEnabledFor.put(filter, featureFilterEvaluationContext);
feature.setEnabledFor(featureEnabledFor);
}
return feature;
} | HashMap<Integer, FeatureFlagFilter> featureEnabledFor = feature.getEnabledFor(); | private Object createFeature(FeatureFlagConfigurationSetting item) throws IOException {
String key = getFeatureSimpleName(item);
Feature feature = new Feature(key, item);
Map<Integer, FeatureFlagFilter> featureEnabledFor = feature.getEnabledFor();
if (featureEnabledFor.size() == 0 && item.isEnabled()) {
return true;
} else if (!item.isEnabled()) {
return false;
}
for (int filter = 0; filter < feature.getEnabledFor().size(); filter++) {
FeatureFlagFilter featureFilterEvaluationContext = featureEnabledFor.get(filter);
Map<String, Object> parameters = featureFilterEvaluationContext.getParameters();
if (parameters == null || !TARGETING_FILTER.equals(featureEnabledFor.get(filter).getName())) {
continue;
}
Object audienceObject = parameters.get(AUDIENCE);
if (audienceObject != null) {
parameters = (Map<String, Object>) audienceObject;
}
List<Object> users = convertToListOrEmptyList(parameters, USERS_CAPS);
List<Object> groupRollouts = convertToListOrEmptyList(parameters, GROUPS_CAPS);
switchKeyValues(parameters, USERS_CAPS, USERS, mapValuesByIndex(users));
switchKeyValues(parameters, GROUPS_CAPS, GROUPS, mapValuesByIndex(groupRollouts));
switchKeyValues(parameters, DEFAULT_ROLLOUT_PERCENTAGE_CAPS, DEFAULT_ROLLOUT_PERCENTAGE,
parameters.get(DEFAULT_ROLLOUT_PERCENTAGE_CAPS));
featureFilterEvaluationContext.setParameters(parameters);
featureEnabledFor.put(filter, featureFilterEvaluationContext);
feature.setEnabledFor(featureEnabledFor);
}
return feature;
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private static final String USERS = "users";
private static final String USERS_CAPS = "Users";
private static final String AUDIENCE = "Audience";
private static final String GROUPS = "groups";
private static final String GROUPS_CAPS = "Groups";
private static final String TARGETING_FILTER = "targetingFilter";
private static final String DEFAULT_ROLLOUT_PERCENTAGE = "defaultRolloutPercentage";
private static final String DEFAULT_ROLLOUT_PERCENTAGE_CAPS = "DefaultRolloutPercentage";
private static final ObjectMapper CASE_INSENSITIVE_MAPPER = new ObjectMapper()
.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true);
private final String context;
private final String label;
private final Map<String, Object> properties = new LinkedHashMap<>();
private final AppConfigurationProperties appConfigurationProperties;
private final HashMap<String, KeyVaultClient> keyVaultClients;
private final ClientStore clients;
private final KeyVaultCredentialProvider keyVaultCredentialProvider;
private final SecretClientBuilderSetup keyVaultClientProvider;
private final AppConfigurationProviderProperties appProperties;
private final ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
private static List<Object> convertToListOrEmptyList(Map<String, Object> parameters, String key) {
List<Object> listObjects = CASE_INSENSITIVE_MAPPER.convertValue(
parameters.get(key),
new TypeReference<List<Object>>() {
});
return listObjects == null ? emptyList() : listObjects;
}
@Override
public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[keySet.size()]);
}
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call {@code initFeatures} to update
* Feature Management, but make sure its done in the last {@code
* AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @return Updated Feature Set from Property Source
* @throws IOException Thrown when processing key/value failed when reading feature flags
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
List<ConfigurationSetting> features = new ArrayList<>();
if (configStore.getFeatureFlags().getEnabled()) {
settingSelector.setKeyFilter(configStore.getFeatureFlags().getKeyFilter())
.setLabelFilter(configStore.getFeatureFlags().getLabelFilter());
features = clients.listSettings(settingSelector, storeName);
if (features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
}
if (settings == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting instanceof SecretReferenceConfigurationSetting) {
String entry = getKeyVaultEntry((SecretReferenceConfigurationSetting) setting);
if (entry != null) {
properties.put(key, entry);
}
} else if (StringUtils.hasText(setting.getContentType())
&& JsonConfigurationParser.isJsonContentType(setting.getContentType())) {
HashMap<String, Object> jsonSettings = JsonConfigurationParser.parseJsonSetting(setting);
for (Entry<String, Object> jsonSetting : jsonSettings.entrySet()) {
key = jsonSetting.getKey().trim().substring(context.length());
properties.put(key, jsonSetting.getValue());
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its entry in Key Vault.
*
* @param value {"uri": "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(SecretReferenceConfigurationSetting secretReference) {
String secretValue = null;
try {
URI uri = null;
try {
uri = new URI(secretReference.getSecretId());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
if (!keyVaultClients.containsKey(uri.getHost())) {
KeyVaultClient client = new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider,
keyVaultClientProvider);
keyVaultClients.put(uri.getHost(), client);
}
KeyVaultSecret secret = keyVaultClients.get(uri.getHost()).getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retreiving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
/**
* Initializes Feature Management configurations. Only one {@code AppConfigurationPropertySource} can call this, and
* it needs to be done after the rest have run initProperties.
*
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
if (setting instanceof FeatureFlagConfigurationSetting) {
Object feature = createFeature((FeatureFlagConfigurationSetting) setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into properties.
* @return Feature created from KeyValueItem
* @throws IOException - If a ConfigurationSetting isn't of the feature flag content type.
*/
@SuppressWarnings("unchecked")
private String getFeatureSimpleName(ConfigurationSetting setting) {
return setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
}
private Map<String, Object> mapValuesByIndex(List<Object> users) {
return IntStream.range(0, users.size()).boxed().collect(toMap(String::valueOf, users::get));
}
private void switchKeyValues(Map<String, Object> parameters, String oldKey, String newKey, Object value) {
parameters.put(newKey, value);
parameters.remove(oldKey);
}
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private static final String USERS = "users";
private static final String USERS_CAPS = "Users";
private static final String AUDIENCE = "Audience";
private static final String GROUPS = "groups";
private static final String GROUPS_CAPS = "Groups";
private static final String TARGETING_FILTER = "targetingFilter";
private static final String DEFAULT_ROLLOUT_PERCENTAGE = "defaultRolloutPercentage";
private static final String DEFAULT_ROLLOUT_PERCENTAGE_CAPS = "DefaultRolloutPercentage";
private static final ObjectMapper CASE_INSENSITIVE_MAPPER = new ObjectMapper()
.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true);
private final String context;
private final String label;
private final Map<String, Object> properties = new LinkedHashMap<>();
private final AppConfigurationProperties appConfigurationProperties;
private final HashMap<String, KeyVaultClient> keyVaultClients;
private final ClientStore clients;
private final KeyVaultCredentialProvider keyVaultCredentialProvider;
private final SecretClientBuilderSetup keyVaultClientProvider;
private final AppConfigurationProviderProperties appProperties;
private final ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
private static List<Object> convertToListOrEmptyList(Map<String, Object> parameters, String key) {
List<Object> listObjects = CASE_INSENSITIVE_MAPPER.convertValue(
parameters.get(key),
new TypeReference<List<Object>>() {
});
return listObjects == null ? emptyList() : listObjects;
}
@Override
public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[keySet.size()]);
}
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call {@code initFeatures} to update
* Feature Management, but make sure its done in the last {@code
* AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @return Updated Feature Set from Property Source
* @throws IOException Thrown when processing key/value failed when reading feature flags
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
List<ConfigurationSetting> features = new ArrayList<>();
if (configStore.getFeatureFlags().getEnabled()) {
settingSelector.setKeyFilter(configStore.getFeatureFlags().getKeyFilter())
.setLabelFilter(configStore.getFeatureFlags().getLabelFilter());
features = clients.listSettings(settingSelector, storeName);
if (features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
}
if (settings == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting instanceof SecretReferenceConfigurationSetting) {
String entry = getKeyVaultEntry((SecretReferenceConfigurationSetting) setting);
if (entry != null) {
properties.put(key, entry);
}
} else if (StringUtils.hasText(setting.getContentType())
&& JsonConfigurationParser.isJsonContentType(setting.getContentType())) {
HashMap<String, Object> jsonSettings = JsonConfigurationParser.parseJsonSetting(setting);
for (Entry<String, Object> jsonSetting : jsonSettings.entrySet()) {
key = jsonSetting.getKey().trim().substring(context.length());
properties.put(key, jsonSetting.getValue());
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its entry in Key Vault.
*
* @param value {"uri": "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(SecretReferenceConfigurationSetting secretReference) {
String secretValue = null;
try {
URI uri = null;
try {
uri = new URI(secretReference.getSecretId());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
if (!keyVaultClients.containsKey(uri.getHost())) {
KeyVaultClient client = new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider,
keyVaultClientProvider);
keyVaultClients.put(uri.getHost(), client);
}
KeyVaultSecret secret = keyVaultClients.get(uri.getHost()).getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retreiving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
/**
* Initializes Feature Management configurations. Only one {@code AppConfigurationPropertySource} can call this, and
* it needs to be done after the rest have run initProperties.
*
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
if (setting instanceof FeatureFlagConfigurationSetting) {
Object feature = createFeature((FeatureFlagConfigurationSetting) setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into properties.
* @return Feature created from KeyValueItem
* @throws IOException - If a ConfigurationSetting isn't of the feature flag content type.
*/
@SuppressWarnings("unchecked")
private String getFeatureSimpleName(ConfigurationSetting setting) {
return setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
}
private Map<String, Object> mapValuesByIndex(List<Object> users) {
return IntStream.range(0, users.size()).boxed().collect(toMap(String::valueOf, users::get));
}
private void switchKeyValues(Map<String, Object> parameters, String oldKey, String newKey, Object value) {
parameters.put(newKey, value);
parameters.remove(oldKey);
}
} |
How about TARGETING_FILTER.equals to avoid NPE? | private Object createFeature(FeatureFlagConfigurationSetting item) throws IOException {
String key = getFeatureSimpleName(item);
Feature feature = new Feature(key, item);
HashMap<Integer, FeatureFlagFilter> featureEnabledFor = feature.getEnabledFor();
if (featureEnabledFor.size() == 0 && item.isEnabled()) {
return true;
} else if (!item.isEnabled()) {
return false;
}
for (int filter = 0; filter < feature.getEnabledFor().size(); filter++) {
FeatureFlagFilter featureFilterEvaluationContext = featureEnabledFor.get(filter);
Map<String, Object> parameters = featureFilterEvaluationContext.getParameters();
if (parameters == null || !featureEnabledFor.get(filter).getName().equals(TARGETING_FILTER)) {
continue;
}
Object audienceObject = parameters.get(AUDIENCE);
if (audienceObject != null) {
parameters = (Map<String, Object>) audienceObject;
}
List<Object> users = convertToListOrEmptyList(parameters, USERS_CAPS);
List<Object> groupRollouts = convertToListOrEmptyList(parameters, GROUPS_CAPS);
switchKeyValues(parameters, USERS_CAPS, USERS, mapValuesByIndex(users));
switchKeyValues(parameters, GROUPS_CAPS, GROUPS, mapValuesByIndex(groupRollouts));
switchKeyValues(parameters, DEFAULT_ROLLOUT_PERCENTAGE_CAPS, DEFAULT_ROLLOUT_PERCENTAGE,
parameters.get(DEFAULT_ROLLOUT_PERCENTAGE_CAPS));
featureFilterEvaluationContext.setParameters(parameters);
featureEnabledFor.put(filter, featureFilterEvaluationContext);
feature.setEnabledFor(featureEnabledFor);
}
return feature;
} | if (parameters == null || !featureEnabledFor.get(filter).getName().equals(TARGETING_FILTER)) { | private Object createFeature(FeatureFlagConfigurationSetting item) throws IOException {
String key = getFeatureSimpleName(item);
Feature feature = new Feature(key, item);
Map<Integer, FeatureFlagFilter> featureEnabledFor = feature.getEnabledFor();
if (featureEnabledFor.size() == 0 && item.isEnabled()) {
return true;
} else if (!item.isEnabled()) {
return false;
}
for (int filter = 0; filter < feature.getEnabledFor().size(); filter++) {
FeatureFlagFilter featureFilterEvaluationContext = featureEnabledFor.get(filter);
Map<String, Object> parameters = featureFilterEvaluationContext.getParameters();
if (parameters == null || !TARGETING_FILTER.equals(featureEnabledFor.get(filter).getName())) {
continue;
}
Object audienceObject = parameters.get(AUDIENCE);
if (audienceObject != null) {
parameters = (Map<String, Object>) audienceObject;
}
List<Object> users = convertToListOrEmptyList(parameters, USERS_CAPS);
List<Object> groupRollouts = convertToListOrEmptyList(parameters, GROUPS_CAPS);
switchKeyValues(parameters, USERS_CAPS, USERS, mapValuesByIndex(users));
switchKeyValues(parameters, GROUPS_CAPS, GROUPS, mapValuesByIndex(groupRollouts));
switchKeyValues(parameters, DEFAULT_ROLLOUT_PERCENTAGE_CAPS, DEFAULT_ROLLOUT_PERCENTAGE,
parameters.get(DEFAULT_ROLLOUT_PERCENTAGE_CAPS));
featureFilterEvaluationContext.setParameters(parameters);
featureEnabledFor.put(filter, featureFilterEvaluationContext);
feature.setEnabledFor(featureEnabledFor);
}
return feature;
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private static final String USERS = "users";
private static final String USERS_CAPS = "Users";
private static final String AUDIENCE = "Audience";
private static final String GROUPS = "groups";
private static final String GROUPS_CAPS = "Groups";
private static final String TARGETING_FILTER = "targetingFilter";
private static final String DEFAULT_ROLLOUT_PERCENTAGE = "defaultRolloutPercentage";
private static final String DEFAULT_ROLLOUT_PERCENTAGE_CAPS = "DefaultRolloutPercentage";
private static final ObjectMapper CASE_INSENSITIVE_MAPPER = new ObjectMapper()
.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true);
private final String context;
private final String label;
private final Map<String, Object> properties = new LinkedHashMap<>();
private final AppConfigurationProperties appConfigurationProperties;
private final HashMap<String, KeyVaultClient> keyVaultClients;
private final ClientStore clients;
private final KeyVaultCredentialProvider keyVaultCredentialProvider;
private final SecretClientBuilderSetup keyVaultClientProvider;
private final AppConfigurationProviderProperties appProperties;
private final ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
private static List<Object> convertToListOrEmptyList(Map<String, Object> parameters, String key) {
List<Object> listObjects = CASE_INSENSITIVE_MAPPER.convertValue(
parameters.get(key),
new TypeReference<List<Object>>() {
});
return listObjects == null ? emptyList() : listObjects;
}
@Override
public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[keySet.size()]);
}
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call {@code initFeatures} to update
* Feature Management, but make sure its done in the last {@code
* AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @return Updated Feature Set from Property Source
* @throws IOException Thrown when processing key/value failed when reading feature flags
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
List<ConfigurationSetting> features = new ArrayList<>();
if (configStore.getFeatureFlags().getEnabled()) {
settingSelector.setKeyFilter(configStore.getFeatureFlags().getKeyFilter())
.setLabelFilter(configStore.getFeatureFlags().getLabelFilter());
features = clients.listSettings(settingSelector, storeName);
if (features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
}
if (settings == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting instanceof SecretReferenceConfigurationSetting) {
String entry = getKeyVaultEntry((SecretReferenceConfigurationSetting) setting);
if (entry != null) {
properties.put(key, entry);
}
} else if (StringUtils.hasText(setting.getContentType())
&& JsonConfigurationParser.isJsonContentType(setting.getContentType())) {
HashMap<String, Object> jsonSettings = JsonConfigurationParser.parseJsonSetting(setting);
for (Entry<String, Object> jsonSetting : jsonSettings.entrySet()) {
key = jsonSetting.getKey().trim().substring(context.length());
properties.put(key, jsonSetting.getValue());
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its entry in Key Vault.
*
* @param value {"uri": "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(SecretReferenceConfigurationSetting secretReference) {
String secretValue = null;
try {
URI uri = null;
try {
uri = new URI(secretReference.getSecretId());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
if (!keyVaultClients.containsKey(uri.getHost())) {
KeyVaultClient client = new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider,
keyVaultClientProvider);
keyVaultClients.put(uri.getHost(), client);
}
KeyVaultSecret secret = keyVaultClients.get(uri.getHost()).getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retreiving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
/**
* Initializes Feature Management configurations. Only one {@code AppConfigurationPropertySource} can call this, and
* it needs to be done after the rest have run initProperties.
*
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
if (setting instanceof FeatureFlagConfigurationSetting) {
Object feature = createFeature((FeatureFlagConfigurationSetting) setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into properties.
* @return Feature created from KeyValueItem
* @throws IOException - If a ConfigurationSetting isn't of the feature flag content type.
*/
@SuppressWarnings("unchecked")
private String getFeatureSimpleName(ConfigurationSetting setting) {
return setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
}
private Map<String, Object> mapValuesByIndex(List<Object> users) {
return IntStream.range(0, users.size()).boxed().collect(toMap(String::valueOf, users::get));
}
private void switchKeyValues(Map<String, Object> parameters, String oldKey, String newKey, Object value) {
parameters.put(newKey, value);
parameters.remove(oldKey);
}
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private static final String USERS = "users";
private static final String USERS_CAPS = "Users";
private static final String AUDIENCE = "Audience";
private static final String GROUPS = "groups";
private static final String GROUPS_CAPS = "Groups";
private static final String TARGETING_FILTER = "targetingFilter";
private static final String DEFAULT_ROLLOUT_PERCENTAGE = "defaultRolloutPercentage";
private static final String DEFAULT_ROLLOUT_PERCENTAGE_CAPS = "DefaultRolloutPercentage";
private static final ObjectMapper CASE_INSENSITIVE_MAPPER = new ObjectMapper()
.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true);
private final String context;
private final String label;
private final Map<String, Object> properties = new LinkedHashMap<>();
private final AppConfigurationProperties appConfigurationProperties;
private final HashMap<String, KeyVaultClient> keyVaultClients;
private final ClientStore clients;
private final KeyVaultCredentialProvider keyVaultCredentialProvider;
private final SecretClientBuilderSetup keyVaultClientProvider;
private final AppConfigurationProviderProperties appProperties;
private final ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
private static List<Object> convertToListOrEmptyList(Map<String, Object> parameters, String key) {
List<Object> listObjects = CASE_INSENSITIVE_MAPPER.convertValue(
parameters.get(key),
new TypeReference<List<Object>>() {
});
return listObjects == null ? emptyList() : listObjects;
}
@Override
public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[keySet.size()]);
}
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call {@code initFeatures} to update
* Feature Management, but make sure its done in the last {@code
* AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @return Updated Feature Set from Property Source
* @throws IOException Thrown when processing key/value failed when reading feature flags
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
List<ConfigurationSetting> features = new ArrayList<>();
if (configStore.getFeatureFlags().getEnabled()) {
settingSelector.setKeyFilter(configStore.getFeatureFlags().getKeyFilter())
.setLabelFilter(configStore.getFeatureFlags().getLabelFilter());
features = clients.listSettings(settingSelector, storeName);
if (features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
}
if (settings == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting instanceof SecretReferenceConfigurationSetting) {
String entry = getKeyVaultEntry((SecretReferenceConfigurationSetting) setting);
if (entry != null) {
properties.put(key, entry);
}
} else if (StringUtils.hasText(setting.getContentType())
&& JsonConfigurationParser.isJsonContentType(setting.getContentType())) {
HashMap<String, Object> jsonSettings = JsonConfigurationParser.parseJsonSetting(setting);
for (Entry<String, Object> jsonSetting : jsonSettings.entrySet()) {
key = jsonSetting.getKey().trim().substring(context.length());
properties.put(key, jsonSetting.getValue());
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its entry in Key Vault.
*
* @param value {"uri": "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(SecretReferenceConfigurationSetting secretReference) {
String secretValue = null;
try {
URI uri = null;
try {
uri = new URI(secretReference.getSecretId());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
if (!keyVaultClients.containsKey(uri.getHost())) {
KeyVaultClient client = new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider,
keyVaultClientProvider);
keyVaultClients.put(uri.getHost(), client);
}
KeyVaultSecret secret = keyVaultClients.get(uri.getHost()).getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retreiving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
/**
* Initializes Feature Management configurations. Only one {@code AppConfigurationPropertySource} can call this, and
* it needs to be done after the rest have run initProperties.
*
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
if (setting instanceof FeatureFlagConfigurationSetting) {
Object feature = createFeature((FeatureFlagConfigurationSetting) setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into properties.
* @return Feature created from KeyValueItem
* @throws IOException - If a ConfigurationSetting isn't of the feature flag content type.
*/
@SuppressWarnings("unchecked")
private String getFeatureSimpleName(ConfigurationSetting setting) {
return setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
}
private Map<String, Object> mapValuesByIndex(List<Object> users) {
return IntStream.range(0, users.size()).boxed().collect(toMap(String::valueOf, users::get));
}
private void switchKeyValues(Map<String, Object> parameters, String oldKey, String newKey, Object value) {
parameters.put(newKey, value);
parameters.remove(oldKey);
}
} |
What is an error target? | public String getTarget() {
return target;
} | } | public String getTarget() {
return target;
} | class HttpResponseError {
@JsonProperty(value = "code", required = true)
private final String code;
@JsonProperty(value = "message", required = true)
private final String message;
@JsonProperty(value = "target")
private String target;
@JsonProperty(value = "innererror")
private HttpResponseInnerError innerError;
@JsonProperty(value = "details")
private List<HttpResponseError> errorDetails;
/**
* Creates an instance of {@link HttpResponseError}.
*
* @param code the error code of this error.
* @param message the error message of this error.
*/
@JsonCreator
public HttpResponseError(@JsonProperty(value = "code", required = true)String code,
@JsonProperty(value = "message", required = true)String message) {
this.code = code;
this.message = message;
}
/**
* Returns the error code of this error.
*
* @return the error code of this error.
*/
public String getCode() {
return code;
}
/**
* Returns the error message of this error.
*
* @return the error message of this error.
*/
public String getMessage() {
return message;
}
/**
* Returns the target of this error.
*
* @return the target of this error.
*/
/**
* Sets the target of this error.
*
* @param target the target of this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setTarget(String target) {
this.target = target;
return this;
}
/**
* Returns the inner error information for this error.
*
* @return the inner error for this error.
*/
public HttpResponseInnerError getInnerError() {
return innerError;
}
/**
* Sets the inner error information for this error.
* @param innerError the inner error for this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setInnerError(HttpResponseInnerError innerError) {
this.innerError = innerError;
return this;
}
/**
* Returns a list of details about specific errors that led to this reported error.
*
* @return the error details.
*/
public List<HttpResponseError> getErrorDetails() {
return errorDetails;
}
/**
* Sets a list of details about specific errors that led to this reported error.
*
* @param errorDetails the error details.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setErrorDetails(List<HttpResponseError> errorDetails) {
this.errorDetails = errorDetails;
return this;
}
} | class HttpResponseError {
@JsonProperty(value = "code", required = true)
private final String code;
@JsonProperty(value = "message", required = true)
private final String message;
@JsonProperty(value = "target")
private String target;
@JsonProperty(value = "innererror")
private HttpResponseInnerError innerError;
@JsonProperty(value = "details")
private List<HttpResponseError> errorDetails;
/**
* Creates an instance of {@link HttpResponseError}.
*
* @param code the error code of this error.
* @param message the error message of this error.
*/
@JsonCreator
public HttpResponseError(@JsonProperty(value = "code", required = true)String code,
@JsonProperty(value = "message", required = true)String message) {
this.code = code;
this.message = message;
}
/**
* Returns the error code of this error.
*
* @return the error code of this error.
*/
public String getCode() {
return code;
}
/**
* Returns the error message of this error.
*
* @return the error message of this error.
*/
public String getMessage() {
return message;
}
/**
* Returns the target of this error.
*
* @return the target of this error.
*/
/**
* Sets the target of this error.
*
* @param target the target of this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setTarget(String target) {
this.target = target;
return this;
}
/**
* Returns the inner error information for this error.
*
* @return the inner error for this error.
*/
public HttpResponseInnerError getInnerError() {
return innerError;
}
/**
* Sets the inner error information for this error.
* @param innerError the inner error for this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setInnerError(HttpResponseInnerError innerError) {
this.innerError = innerError;
return this;
}
/**
* Returns a list of details about specific errors that led to this reported error.
*
* @return the error details.
*/
public List<HttpResponseError> getErrorDetails() {
return errorDetails;
}
/**
* Sets a list of details about specific errors that led to this reported error.
*
* @param errorDetails the error details.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setErrorDetails(List<HttpResponseError> errorDetails) {
this.errorDetails = errorDetails;
return this;
}
} |
If there's a specific component in the service/request that caused the error, it would be the target. In the below sample, `password` field caused an error and is set as the target. ``` { "error": { "code": "BadArgument", "message": "Previous passwords may not be reused", "target": "password", "innererror": { "code": "PasswordError", ... ``` | public String getTarget() {
return target;
} | } | public String getTarget() {
return target;
} | class HttpResponseError {
@JsonProperty(value = "code", required = true)
private final String code;
@JsonProperty(value = "message", required = true)
private final String message;
@JsonProperty(value = "target")
private String target;
@JsonProperty(value = "innererror")
private HttpResponseInnerError innerError;
@JsonProperty(value = "details")
private List<HttpResponseError> errorDetails;
/**
* Creates an instance of {@link HttpResponseError}.
*
* @param code the error code of this error.
* @param message the error message of this error.
*/
@JsonCreator
public HttpResponseError(@JsonProperty(value = "code", required = true)String code,
@JsonProperty(value = "message", required = true)String message) {
this.code = code;
this.message = message;
}
/**
* Returns the error code of this error.
*
* @return the error code of this error.
*/
public String getCode() {
return code;
}
/**
* Returns the error message of this error.
*
* @return the error message of this error.
*/
public String getMessage() {
return message;
}
/**
* Returns the target of this error.
*
* @return the target of this error.
*/
/**
* Sets the target of this error.
*
* @param target the target of this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setTarget(String target) {
this.target = target;
return this;
}
/**
* Returns the inner error information for this error.
*
* @return the inner error for this error.
*/
public HttpResponseInnerError getInnerError() {
return innerError;
}
/**
* Sets the inner error information for this error.
* @param innerError the inner error for this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setInnerError(HttpResponseInnerError innerError) {
this.innerError = innerError;
return this;
}
/**
* Returns a list of details about specific errors that led to this reported error.
*
* @return the error details.
*/
public List<HttpResponseError> getErrorDetails() {
return errorDetails;
}
/**
* Sets a list of details about specific errors that led to this reported error.
*
* @param errorDetails the error details.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setErrorDetails(List<HttpResponseError> errorDetails) {
this.errorDetails = errorDetails;
return this;
}
} | class HttpResponseError {
@JsonProperty(value = "code", required = true)
private final String code;
@JsonProperty(value = "message", required = true)
private final String message;
@JsonProperty(value = "target")
private String target;
@JsonProperty(value = "innererror")
private HttpResponseInnerError innerError;
@JsonProperty(value = "details")
private List<HttpResponseError> errorDetails;
/**
* Creates an instance of {@link HttpResponseError}.
*
* @param code the error code of this error.
* @param message the error message of this error.
*/
@JsonCreator
public HttpResponseError(@JsonProperty(value = "code", required = true)String code,
@JsonProperty(value = "message", required = true)String message) {
this.code = code;
this.message = message;
}
/**
* Returns the error code of this error.
*
* @return the error code of this error.
*/
public String getCode() {
return code;
}
/**
* Returns the error message of this error.
*
* @return the error message of this error.
*/
public String getMessage() {
return message;
}
/**
* Returns the target of this error.
*
* @return the target of this error.
*/
/**
* Sets the target of this error.
*
* @param target the target of this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setTarget(String target) {
this.target = target;
return this;
}
/**
* Returns the inner error information for this error.
*
* @return the inner error for this error.
*/
public HttpResponseInnerError getInnerError() {
return innerError;
}
/**
* Sets the inner error information for this error.
* @param innerError the inner error for this error.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setInnerError(HttpResponseInnerError innerError) {
this.innerError = innerError;
return this;
}
/**
* Returns a list of details about specific errors that led to this reported error.
*
* @return the error details.
*/
public List<HttpResponseError> getErrorDetails() {
return errorDetails;
}
/**
* Sets a list of details about specific errors that led to this reported error.
*
* @param errorDetails the error details.
* @return the updated {@link HttpResponseError} instance.
*/
public HttpResponseError setErrorDetails(List<HttpResponseError> errorDetails) {
this.errorDetails = errorDetails;
return this;
}
} |
You can override `beforeTest()` method defined in `TestBase` to initialize the builder instead of calling this from each test method. | public void registerAndGetSchema() {
initializeBuilder();
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
final String contents = new String(response.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchema(schemaIdToGet))
.assertNext(schema -> {
assertEquals(schemaIdToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
})
.verifyComplete();
} | initializeBuilder(); | public void registerAndGetSchema() {
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
final String contents = new String(response.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchema(schemaIdToGet))
.assertNext(schema -> {
assertEquals(schemaIdToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
})
.verifyComplete();
} | class SchemaRegistryAsyncClientTests extends TestBase {
private static final int RESOURCE_LENGTH = 16;
private static final String AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME = "AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME";
private static final String SCHEMA_CONTENT = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" }]}";
private static final String SCHEMA_GROUP = "at";
private static final Pattern WHITESPACE_PATTERN = Pattern.compile("\\s+", Pattern.MULTILINE);
private static final String SCHEMA_CONTENT_NO_WHITESPACE = WHITESPACE_PATTERN.matcher(SCHEMA_CONTENT).replaceAll("");
SchemaRegistryClientBuilder builder;
@Override
protected void afterTest() {
Mockito.framework().clearInlineMocks();
super.afterTest();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
/**
* Verifies that we can register a schema and then get it by its schemaId. Then add another version of it, and get
* that version.
*/
@Test
public void registerAndGetSchemaTwice() {
initializeBuilder();
final String schemaContentModified = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" },{ \"name\" : \"Sign\", \"type\" : \"string\" }]}";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
final AtomicReference<String> schemaId2 = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
}).verifyComplete();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, schemaContentModified, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId2.set(response.getSchemaId());
}).verifyComplete();
assertNotEquals(schemaId.get(), schemaId2.get());
final String schemaToGet = schemaId2.get();
StepVerifier.create(client2.getSchema(schemaId2.get()))
.assertNext(schema -> {
assertEquals(schemaToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
final String expected = WHITESPACE_PATTERN.matcher(schemaContentModified).replaceAll("");
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(expected, actualContents);
})
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schema group, name, and content.
*/
@Test
public void registerAndGetSchemaId() {
initializeBuilder();
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchemaId(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(schema -> assertEquals(schemaIdToGet, schema))
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerBadRequest() {
initializeBuilder();
final String invalidContent = "\"{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\" }]}\"";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, invalidContent, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ServiceErrorResponseException);
final ServiceErrorResponseException exception = (ServiceErrorResponseException) error;
assertEquals(400, exception.getResponse().getStatusCode());
}).verify();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerAndGetCachedSchema() {
initializeBuilder();
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
final String contents = new String(response.getSchema(), StandardCharsets.UTF_8);
assertEquals(SCHEMA_CONTENT, contents);
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client1.getSchema(schemaIdToGet))
.assertNext(schema -> {
assertEquals(schemaIdToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
assertEquals(SCHEMA_CONTENT, contents);
})
.verifyComplete();
}
/**
* Verifies that we get 404 when non-existent schema returned.
*/
@Test
public void getSchemaDoesNotExist() {
initializeBuilder();
final String schemaId = "59f112cf-ff02-40e6-aca9-0d30ed7f7f94";
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchema(schemaId))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException)error).getResponse().getStatusCode());
})
.verify();
}
/**
* Verifies that we get 404 when non-existent schema query is returned.
*/
@Test
public void getSchemaIdDoesNotExist() {
initializeBuilder();
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchemaId("at", "bar", SCHEMA_CONTENT, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException)error).getResponse().getStatusCode());
})
.verify();
}
void initializeBuilder() {
final String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = mock(TokenCredential.class);
when(tokenCredential.getToken(any(TokenRequestContext.class)))
.thenReturn(Mono.fromCallable(() -> {
return new AccessToken("foo", OffsetDateTime.now().plusMinutes(20));
}));
endpoint = "https:
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME);
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else {
builder.addPolicy(new RetryPolicy())
.addPolicy(interceptorManager.getRecordPolicy());
}
}
} | class SchemaRegistryAsyncClientTests extends TestBase {
static final int RESOURCE_LENGTH = 16;
static final String AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME = "AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME";
static final String SCHEMA_CONTENT = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" }]}";
static final String SCHEMA_GROUP = "at";
static final Pattern WHITESPACE_PATTERN = Pattern.compile("\\s+", Pattern.MULTILINE);
static final String SCHEMA_CONTENT_NO_WHITESPACE = WHITESPACE_PATTERN.matcher(SCHEMA_CONTENT).replaceAll("");
private SchemaRegistryClientBuilder builder;
@Override
protected void beforeTest() {
final String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = mock(TokenCredential.class);
when(tokenCredential.getToken(any(TokenRequestContext.class))).thenAnswer(invocationOnMock -> {
return Mono.fromCallable(() -> {
return new AccessToken("foo", OffsetDateTime.now().plusMinutes(20));
});
});
endpoint = "https:
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME);
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else {
builder.addPolicy(new RetryPolicy())
.addPolicy(interceptorManager.getRecordPolicy());
}
}
@Override
protected void afterTest() {
Mockito.framework().clearInlineMocks();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
/**
* Verifies that we can register a schema and then get it by its schemaId. Then add another version of it, and get
* that version.
*/
@Test
public void registerAndGetSchemaTwice() {
final String schemaContentModified = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" },{ \"name\" : \"Sign\", \"type\" : \"string\" }]}";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
final AtomicReference<String> schemaId2 = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, SCHEMA_CONTENT);
schemaId.set(response.getSchemaId());
}).verifyComplete();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, schemaContentModified, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, schemaContentModified);
schemaId2.set(response.getSchemaId());
}).verifyComplete();
assertNotEquals(schemaId.get(), schemaId2.get());
final String schemaIdToGet = schemaId2.get();
StepVerifier.create(client2.getSchema(schemaIdToGet))
.assertNext(schema -> assertSchemaProperties(schema, schemaIdToGet, schemaName, SCHEMA_CONTENT))
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schema group, name, and content.
*/
@Test
public void registerAndGetSchemaId() {
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, SCHEMA_CONTENT);
schemaId.set(response.getSchemaId());
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchemaId(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(schema -> assertEquals(schemaIdToGet, schema))
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerBadRequest() {
final String invalidContent = "\"{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\" }]}\"";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, invalidContent, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ServiceErrorResponseException);
final ServiceErrorResponseException exception = (ServiceErrorResponseException) error;
assertEquals(400, exception.getResponse().getStatusCode());
}).verify();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerAndGetCachedSchema() {
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, SCHEMA_CONTENT);
schemaId.set(response.getSchemaId());
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client1.getSchema(schemaIdToGet))
.assertNext(schema -> assertSchemaProperties(schema, schemaIdToGet, schemaName, SCHEMA_CONTENT))
.verifyComplete();
}
/**
* Verifies that we get 404 when non-existent schema returned.
*/
@Test
public void getSchemaDoesNotExist() {
final String schemaId = "59f112cf-ff02-40e6-aca9-0d30ed7f7f94";
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchema(schemaId))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException) error).getResponse().getStatusCode());
})
.verify();
}
/**
* Verifies that we get 404 when non-existent schema query is returned.
*/
@Test
public void getSchemaIdDoesNotExist() {
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchemaId("at", "bar", SCHEMA_CONTENT, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException) error).getResponse().getStatusCode());
})
.verify();
}
static void assertSchemaProperties(SchemaProperties actual, String expectedSchemaId, String expectedSchemaName,
String expectedContents) {
assertNotEquals(expectedContents, "'expectedContents' should not be null.");
assertEquals(expectedSchemaName, actual.getSchemaName());
assertEquals(SerializationType.AVRO, actual.getSerializationType());
assertNotNull(actual.getSchemaId());
if (expectedSchemaId != null) {
assertEquals(expectedSchemaId, actual.getSchemaId());
}
final String contents = new String(actual.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
final String expectedContentsNoWhitespace = WHITESPACE_PATTERN.matcher(actualContents).replaceAll("");
assertEquals(expectedContentsNoWhitespace, actualContents);
}
} |
It used to be a parameterised test that would take a different HttpClient because I thought we wanted to test different variations of the client. https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationClientTest.java#L91 | public void registerAndGetSchema() {
initializeBuilder();
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
final String contents = new String(response.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchema(schemaIdToGet))
.assertNext(schema -> {
assertEquals(schemaIdToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
})
.verifyComplete();
} | initializeBuilder(); | public void registerAndGetSchema() {
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
final String contents = new String(response.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchema(schemaIdToGet))
.assertNext(schema -> {
assertEquals(schemaIdToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(SCHEMA_CONTENT_NO_WHITESPACE, actualContents);
})
.verifyComplete();
} | class SchemaRegistryAsyncClientTests extends TestBase {
private static final int RESOURCE_LENGTH = 16;
private static final String AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME = "AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME";
private static final String SCHEMA_CONTENT = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" }]}";
private static final String SCHEMA_GROUP = "at";
private static final Pattern WHITESPACE_PATTERN = Pattern.compile("\\s+", Pattern.MULTILINE);
private static final String SCHEMA_CONTENT_NO_WHITESPACE = WHITESPACE_PATTERN.matcher(SCHEMA_CONTENT).replaceAll("");
SchemaRegistryClientBuilder builder;
@Override
protected void afterTest() {
Mockito.framework().clearInlineMocks();
super.afterTest();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
/**
* Verifies that we can register a schema and then get it by its schemaId. Then add another version of it, and get
* that version.
*/
@Test
public void registerAndGetSchemaTwice() {
initializeBuilder();
final String schemaContentModified = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" },{ \"name\" : \"Sign\", \"type\" : \"string\" }]}";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
final AtomicReference<String> schemaId2 = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
}).verifyComplete();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, schemaContentModified, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId2.set(response.getSchemaId());
}).verifyComplete();
assertNotEquals(schemaId.get(), schemaId2.get());
final String schemaToGet = schemaId2.get();
StepVerifier.create(client2.getSchema(schemaId2.get()))
.assertNext(schema -> {
assertEquals(schemaToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
final String expected = WHITESPACE_PATTERN.matcher(schemaContentModified).replaceAll("");
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
assertEquals(expected, actualContents);
})
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schema group, name, and content.
*/
@Test
public void registerAndGetSchemaId() {
initializeBuilder();
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchemaId(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(schema -> assertEquals(schemaIdToGet, schema))
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerBadRequest() {
initializeBuilder();
final String invalidContent = "\"{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\" }]}\"";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, invalidContent, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ServiceErrorResponseException);
final ServiceErrorResponseException exception = (ServiceErrorResponseException) error;
assertEquals(400, exception.getResponse().getStatusCode());
}).verify();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerAndGetCachedSchema() {
initializeBuilder();
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertEquals(schemaName, response.getSchemaName());
assertNotNull(response.getSchemaId());
schemaId.set(response.getSchemaId());
final String contents = new String(response.getSchema(), StandardCharsets.UTF_8);
assertEquals(SCHEMA_CONTENT, contents);
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client1.getSchema(schemaIdToGet))
.assertNext(schema -> {
assertEquals(schemaIdToGet, schema.getSchemaId());
assertEquals(SerializationType.AVRO, schema.getSerializationType());
final String contents = new String(schema.getSchema(), StandardCharsets.UTF_8);
assertEquals(SCHEMA_CONTENT, contents);
})
.verifyComplete();
}
/**
* Verifies that we get 404 when non-existent schema returned.
*/
@Test
public void getSchemaDoesNotExist() {
initializeBuilder();
final String schemaId = "59f112cf-ff02-40e6-aca9-0d30ed7f7f94";
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchema(schemaId))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException)error).getResponse().getStatusCode());
})
.verify();
}
/**
* Verifies that we get 404 when non-existent schema query is returned.
*/
@Test
public void getSchemaIdDoesNotExist() {
initializeBuilder();
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchemaId("at", "bar", SCHEMA_CONTENT, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException)error).getResponse().getStatusCode());
})
.verify();
}
void initializeBuilder() {
final String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = mock(TokenCredential.class);
when(tokenCredential.getToken(any(TokenRequestContext.class)))
.thenReturn(Mono.fromCallable(() -> {
return new AccessToken("foo", OffsetDateTime.now().plusMinutes(20));
}));
endpoint = "https:
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME);
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else {
builder.addPolicy(new RetryPolicy())
.addPolicy(interceptorManager.getRecordPolicy());
}
}
} | class SchemaRegistryAsyncClientTests extends TestBase {
static final int RESOURCE_LENGTH = 16;
static final String AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME = "AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME";
static final String SCHEMA_CONTENT = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" }]}";
static final String SCHEMA_GROUP = "at";
static final Pattern WHITESPACE_PATTERN = Pattern.compile("\\s+", Pattern.MULTILINE);
static final String SCHEMA_CONTENT_NO_WHITESPACE = WHITESPACE_PATTERN.matcher(SCHEMA_CONTENT).replaceAll("");
private SchemaRegistryClientBuilder builder;
@Override
protected void beforeTest() {
final String endpoint;
TokenCredential tokenCredential;
if (interceptorManager.isPlaybackMode()) {
tokenCredential = mock(TokenCredential.class);
when(tokenCredential.getToken(any(TokenRequestContext.class))).thenAnswer(invocationOnMock -> {
return Mono.fromCallable(() -> {
return new AccessToken("foo", OffsetDateTime.now().plusMinutes(20));
});
});
endpoint = "https:
} else {
tokenCredential = new DefaultAzureCredentialBuilder().build();
endpoint = System.getenv(AZURE_EVENTHUBS_FULLY_QUALIFIED_DOMAIN_NAME);
assertNotNull(endpoint, "'endpoint' cannot be null in LIVE/RECORD mode.");
}
builder = new SchemaRegistryClientBuilder()
.credential(tokenCredential)
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
} else {
builder.addPolicy(new RetryPolicy())
.addPolicy(interceptorManager.getRecordPolicy());
}
}
@Override
protected void afterTest() {
Mockito.framework().clearInlineMocks();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
/**
* Verifies that we can register a schema and then get it by its schemaId. Then add another version of it, and get
* that version.
*/
@Test
public void registerAndGetSchemaTwice() {
final String schemaContentModified = "{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\", \"type\" : \"int\" },{ \"name\" : \"Sign\", \"type\" : \"string\" }]}";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
final AtomicReference<String> schemaId2 = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, SCHEMA_CONTENT);
schemaId.set(response.getSchemaId());
}).verifyComplete();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, schemaContentModified, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, schemaContentModified);
schemaId2.set(response.getSchemaId());
}).verifyComplete();
assertNotEquals(schemaId.get(), schemaId2.get());
final String schemaIdToGet = schemaId2.get();
StepVerifier.create(client2.getSchema(schemaIdToGet))
.assertNext(schema -> assertSchemaProperties(schema, schemaIdToGet, schemaName, SCHEMA_CONTENT))
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schema group, name, and content.
*/
@Test
public void registerAndGetSchemaId() {
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final SchemaRegistryAsyncClient client2 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, SCHEMA_CONTENT);
schemaId.set(response.getSchemaId());
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client2.getSchemaId(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(schema -> assertEquals(schemaIdToGet, schema))
.verifyComplete();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerBadRequest() {
final String invalidContent = "\"{\"type\" : \"record\",\"namespace\" : \"TestSchema\",\"name\" : \"Employee\",\"fields\" : [{ \"name\" : \"Name\" , \"type\" : \"string\" },{ \"name\" : \"Age\" }]}\"";
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, invalidContent, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ServiceErrorResponseException);
final ServiceErrorResponseException exception = (ServiceErrorResponseException) error;
assertEquals(400, exception.getResponse().getStatusCode());
}).verify();
}
/**
* Verifies that we can register a schema and then get it by its schemaId.
*/
@Test
public void registerAndGetCachedSchema() {
final String schemaName = testResourceNamer.randomName("sch", RESOURCE_LENGTH);
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
final AtomicReference<String> schemaId = new AtomicReference<>();
StepVerifier.create(client1.registerSchema(SCHEMA_GROUP, schemaName, SCHEMA_CONTENT, SerializationType.AVRO))
.assertNext(response -> {
assertSchemaProperties(response, null, schemaName, SCHEMA_CONTENT);
schemaId.set(response.getSchemaId());
}).verifyComplete();
final String schemaIdToGet = schemaId.get();
assertNotNull(schemaIdToGet);
StepVerifier.create(client1.getSchema(schemaIdToGet))
.assertNext(schema -> assertSchemaProperties(schema, schemaIdToGet, schemaName, SCHEMA_CONTENT))
.verifyComplete();
}
/**
* Verifies that we get 404 when non-existent schema returned.
*/
@Test
public void getSchemaDoesNotExist() {
final String schemaId = "59f112cf-ff02-40e6-aca9-0d30ed7f7f94";
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchema(schemaId))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException) error).getResponse().getStatusCode());
})
.verify();
}
/**
* Verifies that we get 404 when non-existent schema query is returned.
*/
@Test
public void getSchemaIdDoesNotExist() {
final SchemaRegistryAsyncClient client1 = builder.buildAsyncClient();
StepVerifier.create(client1.getSchemaId("at", "bar", SCHEMA_CONTENT, SerializationType.AVRO))
.expectErrorSatisfies(error -> {
assertTrue(error instanceof ResourceNotFoundException);
assertEquals(404, ((ResourceNotFoundException) error).getResponse().getStatusCode());
})
.verify();
}
static void assertSchemaProperties(SchemaProperties actual, String expectedSchemaId, String expectedSchemaName,
String expectedContents) {
assertNotEquals(expectedContents, "'expectedContents' should not be null.");
assertEquals(expectedSchemaName, actual.getSchemaName());
assertEquals(SerializationType.AVRO, actual.getSerializationType());
assertNotNull(actual.getSchemaId());
if (expectedSchemaId != null) {
assertEquals(expectedSchemaId, actual.getSchemaId());
}
final String contents = new String(actual.getSchema(), StandardCharsets.UTF_8);
final String actualContents = WHITESPACE_PATTERN.matcher(contents).replaceAll("");
final String expectedContentsNoWhitespace = WHITESPACE_PATTERN.matcher(actualContents).replaceAll("");
assertEquals(expectedContentsNoWhitespace, actualContents);
}
} |
What happens service side if it receives an empty `clientFilters` list? The other option would be to defer this instantiation and only perform it when `addFilter` is called and `clientFilters == null` | public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
clientFilters = new ArrayList<>();
} | clientFilters = new ArrayList<>(); | public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) {
this.featureId = featureId;
this.isEnabled = isEnabled;
super.setKey(KEY_PREFIX + featureId);
super.setContentType(FEATURE_FLAG_CONTENT_TYPE);
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
super.setValue(value);
final FeatureFlagConfigurationSetting updatedSetting = readFeatureFlagConfigurationSettingValue(value);
this.featureId = updatedSetting.getFeatureId();
this.description = updatedSetting.getDescription();
this.isEnabled = updatedSetting.isEnabled();
this.displayName = updatedSetting.getDisplayName();
if (updatedSetting.getClientFilters() != null) {
this.clientFilters = StreamSupport.stream(updatedSetting.getClientFilters().spliterator(), false)
.collect(Collectors.toList());
} else {
this.clientFilters = null;
}
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
updateSettingValue();
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
this.isEnabled = isEnabled;
updateSettingValue();
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
this.description = description;
updateSettingValue();
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
this.displayName = displayName;
updateSettingValue();
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
this.clientFilters = clientFilters;
updateSettingValue();
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
clientFilters.add(clientFilter);
updateSettingValue();
return this;
}
private void updateSettingValue() {
try {
super.setValue(writeFeatureFlagConfigurationSetting(this));
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
}
} | class FeatureFlagConfigurationSetting extends ConfigurationSetting {
private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class);
private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8";
private String featureId;
private boolean isEnabled;
private String description;
private String displayName;
private List<FeatureFlagFilter> clientFilters;
/**
* A prefix is used to construct a feature flag configuration setting's key.
*/
public static final String KEY_PREFIX = ".appconfig.featureflag/";
/**
* The constructor for a feature flag configuration setting.
*
* @param featureId A feature flag identification value that used to construct in setting's key. The key of setting
* is {@code KEY_PREFIX} concatenate {@code featureId}.
* @param isEnabled A boolean value to turn on/off the feature flag setting.
*/
/**
* Sets the key of this setting.
*
* @param key The key to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setKey(String key) {
super.setKey(key);
return this;
}
/**
* Sets the value of this setting.
*
* @param value The value to associate with this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
@Override
public FeatureFlagConfigurationSetting setValue(String value) {
super.setValue(value);
final FeatureFlagConfigurationSetting updatedSetting = readFeatureFlagConfigurationSettingValue(value);
this.featureId = updatedSetting.getFeatureId();
this.description = updatedSetting.getDescription();
this.isEnabled = updatedSetting.isEnabled();
this.displayName = updatedSetting.getDisplayName();
this.clientFilters = StreamSupport.stream(updatedSetting.getClientFilters().spliterator(), false)
.collect(Collectors.toList());
return this;
}
/**
* Sets the label of this configuration setting. {@link
* set.
*
* @param label The label of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setLabel(String label) {
super.setLabel(label);
return this;
}
/**
* Sets the content type. By default, the content type is null.
*
* @param contentType The content type of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setContentType(String contentType) {
super.setContentType(contentType);
return this;
}
/**
* Sets the ETag for this configuration setting.
*
* @param etag The ETag for the configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setETag(String etag) {
super.setETag(etag);
return this;
}
/**
* Sets the tags for this configuration setting.
*
* @param tags The tags to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
@Override
public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) {
super.setTags(tags);
return this;
}
/**
* Get the feature ID of this configuration setting.
*
* @return the feature ID of this configuration setting.
*/
public String getFeatureId() {
return featureId;
}
/**
* Set the feature ID of this configuration setting.
*
* @param featureId the feature ID of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setFeatureId(String featureId) {
this.featureId = featureId;
super.setKey(KEY_PREFIX + featureId);
updateSettingValue();
return this;
}
/**
* Get the boolean indicator to show if the setting is turn on or off.
*
* @return the boolean indicator to show if the setting is turn on or off.
*/
public boolean isEnabled() {
return this.isEnabled;
}
/**
* Set the boolean indicator to show if the setting is turn on or off.
*
* @param isEnabled the boolean indicator to show if the setting is turn on or off.
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) {
this.isEnabled = isEnabled;
updateSettingValue();
return this;
}
/**
* Get the description of this configuration setting.
*
* @return the description of this configuration setting.
*/
public String getDescription() {
return description;
}
/**
* Set the description of this configuration setting.
*
* @param description the description of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDescription(String description) {
this.description = description;
updateSettingValue();
return this;
}
/**
* Get the display name of this configuration setting.
*
* @return the display name of this configuration setting.
*/
public String getDisplayName() {
return displayName;
}
/**
* Set the display name of this configuration setting.
*
* @param displayName the display name of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setDisplayName(String displayName) {
this.displayName = displayName;
updateSettingValue();
return this;
}
/**
* Gets the feature flag filters of this configuration setting.
*
* @return the feature flag filters of this configuration setting.
*/
public List<FeatureFlagFilter> getClientFilters() {
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
return clientFilters;
}
/**
* Sets the feature flag filters of this configuration setting.
*
* @param clientFilters the feature flag filters of this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
* @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format.
*/
public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) {
this.clientFilters = clientFilters;
updateSettingValue();
return this;
}
/**
* Add a feature flag filter to this configuration setting.
*
* @param clientFilter a feature flag filter to add to this configuration setting.
*
* @return The updated {@link FeatureFlagConfigurationSetting} object.
*/
public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) {
if (clientFilters == null) {
clientFilters = new ArrayList<>();
}
clientFilters.add(clientFilter);
updateSettingValue();
return this;
}
private void updateSettingValue() {
try {
super.setValue(writeFeatureFlagConfigurationSetting(this));
} catch (IOException exception) {
LOGGER.logExceptionAsError(new IllegalArgumentException(
"Can't parse Feature Flag configuration setting value.", exception));
}
}
} |
Could also change this so that the check is not order dependent. | void listEntitiesWithFilter() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.listEntities(options, null, null).forEach(tableEntity -> {
assertEquals(partitionKeyValue, tableEntity.getPartitionKey());
assertEquals(rowKeyValue, tableEntity.getRowKey());
});
} | final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); | void listEntitiesWithFilter() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.listEntities(options, null, null).forEach(tableEntity -> {
assertEquals(partitionKeyValue, tableEntity.getPartitionKey());
assertEquals(rowKeyValue, tableEntity.getRowKey());
});
} | class TableClientTest extends TestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = System.getenv("AZURE_TABLES_CONNECTION_STRING") != null
&& System.getenv("AZURE_TABLES_CONNECTION_STRING").contains("cosmos.azure.com");
private TableClient tableClient;
private HttpPipelinePolicy recordPolicy;
private HttpClient playbackClient;
private TableClientBuilder getClientBuilder(String tableName, String connectionString) {
final TableClientBuilder builder = new TableClientBuilder()
.connectionString(connectionString)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
playbackClient = interceptorManager.getPlaybackClient();
builder.httpClient(playbackClient);
} else {
builder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
recordPolicy = interceptorManager.getRecordPolicy();
builder.addPolicy(recordPolicy);
}
}
return builder;
}
@Override
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildClient();
tableClient.createTable();
}
@Test
void createTable() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
assertNotNull(tableClient2.createTable());
}
@Test
void createTableWithResponse() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient2.createTableWithResponse(null, null).getStatusCode());
}
@Test
void createEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
assertDoesNotThrow(() -> tableClient.createEntity(tableEntity));
}
@Test
void createEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.createEntityWithResponse(entity, null, null).getStatusCode());
}
@Test
void createEntityWithAllSupportedDataTypes() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
}
/*@Test
void createEntitySubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
}*/
@Test
void deleteTable() {
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
void deleteNonExistingTable() {
tableClient.deleteTable();
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
void deleteTableWithResponse() {
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
void deleteNonExistingTableWithResponse() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse(null, null);
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
void deleteEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
void deleteNonExistingEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
void deleteEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, false, null, null).getStatusCode());
}
@Test
void deleteNonExistingEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(entity, false, null, null).getStatusCode());
}
@Test
void deleteEntityWithResponseMatchETag() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, true, null, null).getStatusCode());
}
@Test
void getEntityWithResponse() {
getEntityWithResponseImpl(this.tableClient, this.testResourceNamer);
}
static void getEntityWithResponseImpl(TableClient tableClient, TestResourceNamer testResourceNamer) {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
void getEntityWithResponseWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
}
/*@Test
void getEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class, null, null);
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
}*/
@Test
void updateEntityWithResponseReplace() {
updateEntityWithResponse(TableEntityUpdateMode.REPLACE);
}
@Test
void updateEntityWithResponseMerge() {
updateEntityWithResponse(TableEntityUpdateMode.MERGE);
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponse(TableEntityUpdateMode mode) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(createdEntity, mode, true, null, null).getStatusCode());
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
}
/*@Test
void updateEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity);
tableEntity.setSubclassProperty("UpdatedValue");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true, null, null)
.getStatusCode()));
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
}*/
@Test
void listEntities() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities().iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
assertEquals(2, retrievedEntities.size());
}
@Test
@Test
void listEntitiesWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity);
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
TableEntity retrievedEntity = iterator.next().getValue().get(0);
assertNull(retrievedEntity.getPartitionKey());
assertNull(retrievedEntity.getRowKey());
assertEquals("valueC", retrievedEntity.getProperties().get("propertyC"));
assertNull(retrievedEntity.getProperties().get("propertyD"));
}
@Test
void listEntitiesWithTop() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
/*@Test
void listEntitiesSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(SampleEntity.class).iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
TableEntity retrievedEntity = retrievedEntities.get(0);
TableEntity retrievedEntity2 = retrievedEntities.get(1);
assertEquals(partitionKeyValue, retrievedEntity.getPartitionKey());
assertEquals(rowKeyValue, retrievedEntity.getRowKey());
assertEquals(partitionKeyValue, retrievedEntity2.getPartitionKey());
assertEquals(rowKeyValue2, retrievedEntity2.getRowKey());
}*/
@Test
void submitTransaction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
void submitTransactionAsyncAllActions() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValueCreate = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueDelete = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete));
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
final Response<TableTransactionResult> response =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
}
@Test
void submitTransactionAsyncWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("DeleteEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
return;
}
fail();
}
@Test
void submitTransactionAsyncWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
return;
} catch (TableServiceException e) {
assertTrue(IS_COSMOS_TEST);
assertEquals(400, e.getResponse().getStatusCode());
assertTrue(e.getMessage().contains("InvalidDuplicateRow"));
return;
}
fail();
}
@Test
void submitTransactionAsyncWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
if (IS_COSMOS_TEST) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
} else {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue2));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
}
return;
}
fail();
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Skipping Cosmos test.");
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableClient newTableClient = tableClientBuilder.buildClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, newTableClient.createEntityWithResponse(entity, null, null).getStatusCode());
}
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier), null, null)
.getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null).getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
}
} | class TableClientTest extends TestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = System.getenv("AZURE_TABLES_CONNECTION_STRING") != null
&& System.getenv("AZURE_TABLES_CONNECTION_STRING").contains("cosmos.azure.com");
private TableClient tableClient;
private HttpPipelinePolicy recordPolicy;
private HttpClient playbackClient;
private TableClientBuilder getClientBuilder(String tableName, String connectionString) {
final TableClientBuilder builder = new TableClientBuilder()
.connectionString(connectionString)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
playbackClient = interceptorManager.getPlaybackClient();
builder.httpClient(playbackClient);
} else {
builder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
recordPolicy = interceptorManager.getRecordPolicy();
builder.addPolicy(recordPolicy);
}
}
return builder;
}
@Override
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildClient();
tableClient.createTable();
}
@Test
void createTable() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
assertNotNull(tableClient2.createTable());
}
@Test
void createTableWithResponse() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient2.createTableWithResponse(null, null).getStatusCode());
}
@Test
void createEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
assertDoesNotThrow(() -> tableClient.createEntity(tableEntity));
}
@Test
void createEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.createEntityWithResponse(entity, null, null).getStatusCode());
}
@Test
void createEntityWithAllSupportedDataTypes() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
}
/*@Test
void createEntitySubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
}*/
@Test
void deleteTable() {
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
void deleteNonExistingTable() {
tableClient.deleteTable();
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
void deleteTableWithResponse() {
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
void deleteNonExistingTableWithResponse() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse(null, null);
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
void deleteEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
void deleteNonExistingEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
void deleteEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, false, null, null).getStatusCode());
}
@Test
void deleteNonExistingEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(entity, false, null, null).getStatusCode());
}
@Test
void deleteEntityWithResponseMatchETag() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, true, null, null).getStatusCode());
}
@Test
void getEntityWithResponse() {
getEntityWithResponseImpl(this.tableClient, this.testResourceNamer);
}
static void getEntityWithResponseImpl(TableClient tableClient, TestResourceNamer testResourceNamer) {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
void getEntityWithResponseWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
}
/*@Test
void getEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class, null, null);
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
}*/
@Test
void updateEntityWithResponseReplace() {
updateEntityWithResponse(TableEntityUpdateMode.REPLACE);
}
@Test
void updateEntityWithResponseMerge() {
updateEntityWithResponse(TableEntityUpdateMode.MERGE);
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponse(TableEntityUpdateMode mode) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(createdEntity, mode, true, null, null).getStatusCode());
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
}
/*@Test
void updateEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity);
tableEntity.setSubclassProperty("UpdatedValue");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true, null, null)
.getStatusCode()));
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
}*/
@Test
void listEntities() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities().iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
assertEquals(2, retrievedEntities.size());
}
@Test
@Test
void listEntitiesWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity);
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
TableEntity retrievedEntity = iterator.next().getValue().get(0);
assertNull(retrievedEntity.getPartitionKey());
assertNull(retrievedEntity.getRowKey());
assertEquals("valueC", retrievedEntity.getProperties().get("propertyC"));
assertNull(retrievedEntity.getProperties().get("propertyD"));
}
@Test
void listEntitiesWithTop() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
/*@Test
void listEntitiesSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(SampleEntity.class).iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
TableEntity retrievedEntity = retrievedEntities.get(0);
TableEntity retrievedEntity2 = retrievedEntities.get(1);
assertEquals(partitionKeyValue, retrievedEntity.getPartitionKey());
assertEquals(rowKeyValue, retrievedEntity.getRowKey());
assertEquals(partitionKeyValue, retrievedEntity2.getPartitionKey());
assertEquals(rowKeyValue2, retrievedEntity2.getRowKey());
}*/
@Test
void submitTransaction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
void submitTransactionAsyncAllActions() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValueCreate = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueDelete = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete));
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
final Response<TableTransactionResult> response =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
}
@Test
void submitTransactionAsyncWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("DeleteEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
return;
}
fail();
}
@Test
void submitTransactionAsyncWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
return;
} catch (TableServiceException e) {
assertTrue(IS_COSMOS_TEST);
assertEquals(400, e.getResponse().getStatusCode());
assertTrue(e.getMessage().contains("InvalidDuplicateRow"));
return;
}
fail();
}
@Test
void submitTransactionAsyncWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
if (IS_COSMOS_TEST) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
} else {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue2));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
}
return;
}
fail();
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Skipping Cosmos test.");
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableClient newTableClient = tableClientBuilder.buildClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, newTableClient.createEntityWithResponse(entity, null, null).getStatusCode());
}
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier), null, null)
.getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null).getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
}
} |
This is interesting, I thought `Mono.just()` inside a `flatMap` would require the outer publisher to be subscribed first for this `Mono.just()` to be executed. | public void testOnlyOneThreadRefreshesToken() throws Exception {
SimpleTokenCache cache = new SimpleTokenCache(() -> incrementalRemoteGetTokenAsync(new AtomicInteger(1)));
CountDownLatch latch = new CountDownLatch(1);
AtomicLong maxMillis = new AtomicLong(0);
Flux.range(1, 10).flatMap(i -> Mono.fromCallable(OffsetDateTime::now))
.parallel(10)
.runOn(Schedulers.parallel())
.flatMap(start -> cache.getToken()
.map(t -> Duration.between(start, OffsetDateTime.now()).toMillis())
.doOnNext(millis -> {
if (millis > maxMillis.get()) {
maxMillis.set(millis);
}
}))
.doOnComplete(latch::countDown)
.subscribe();
latch.await();
long maxMs = maxMillis.get();
Assertions.assertTrue(maxMs > 1000, () -> "maxMillis was less than 1000ms. Was " + maxMs + "ms.");
Assertions.assertTrue(maxMs < 2000, () -> "maxMillis was greater than 2000ms. Was " + maxMs + "ms.");
} | Flux.range(1, 10).flatMap(i -> Mono.fromCallable(OffsetDateTime::now)) | public void testOnlyOneThreadRefreshesToken() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return incrementalRemoteGetTokenAsync(new AtomicInteger(1));
});
CountDownLatch latch = new CountDownLatch(1);
Flux.range(1, 10).flatMap(ignored -> Mono.just(OffsetDateTime.now()))
.parallel(10)
.runOn(Schedulers.boundedElastic())
.flatMap(start -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
latch.await();
assertEquals(1, refreshes.get());
} | class TokenCacheTests {
private static final Random RANDOM = new Random();
@Test
@Test
public void testLongRunningWontOverflow() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return remoteGetTokenThatExpiresSoonAsync();
});
VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
CountDownLatch latch = new CountDownLatch(1);
Flux.interval(Duration.ofMillis(100), virtualTimeScheduler)
.take(100)
.flatMap(i -> Mono.just(OffsetDateTime.now())
.subscribeOn(Schedulers.parallel())
.flatMap(start -> cache.getToken()
.map(t -> Duration.between(start, OffsetDateTime.now()).toMillis())
.doOnNext(millis -> {
})))
.doOnComplete(latch::countDown)
.subscribe();
virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40));
latch.await();
Assertions.assertTrue(refreshes.get() <= 11);
}
private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() {
return Mono.delay(Duration.ofMillis(1000)).map(l -> new Token(Integer.toString(RANDOM.nextInt(100)), 0));
}
private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) {
return Mono.delay(Duration.ofSeconds(latency.getAndIncrement()))
.map(l -> new Token(Integer.toString(RANDOM.nextInt(100))));
}
private static class Token extends AccessToken {
Token(String token) {
this(token, 5000);
}
Token(String token, long validityInMillis) {
super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis)));
}
}
} | class TokenCacheTests {
private static final Random RANDOM = new Random();
@Test
@Test
public void testLongRunningWontOverflow() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return remoteGetTokenThatExpiresSoonAsync();
});
VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
CountDownLatch latch = new CountDownLatch(1);
Flux.interval(Duration.ofMillis(100), virtualTimeScheduler)
.take(100)
.flatMap(i -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40));
latch.await();
Assertions.assertTrue(refreshes.get() <= 11);
}
private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() {
return Mono.delay(Duration.ofMillis(1000)).map(l -> new Token(Integer.toString(RANDOM.nextInt(100)), 0));
}
private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) {
return Mono.delay(Duration.ofSeconds(latency.getAndIncrement()))
.map(l -> new Token(Integer.toString(RANDOM.nextInt(100))));
}
private static class Token extends AccessToken {
Token(String token) {
this(token, 5000);
}
Token(String token, long validityInMillis) {
super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis)));
}
}
} |
Looks like you're correct, this fix was then transitioning over to `parallel`. I'll update the description as it is misleading. | public void testOnlyOneThreadRefreshesToken() throws Exception {
SimpleTokenCache cache = new SimpleTokenCache(() -> incrementalRemoteGetTokenAsync(new AtomicInteger(1)));
CountDownLatch latch = new CountDownLatch(1);
AtomicLong maxMillis = new AtomicLong(0);
Flux.range(1, 10).flatMap(i -> Mono.fromCallable(OffsetDateTime::now))
.parallel(10)
.runOn(Schedulers.parallel())
.flatMap(start -> cache.getToken()
.map(t -> Duration.between(start, OffsetDateTime.now()).toMillis())
.doOnNext(millis -> {
if (millis > maxMillis.get()) {
maxMillis.set(millis);
}
}))
.doOnComplete(latch::countDown)
.subscribe();
latch.await();
long maxMs = maxMillis.get();
Assertions.assertTrue(maxMs > 1000, () -> "maxMillis was less than 1000ms. Was " + maxMs + "ms.");
Assertions.assertTrue(maxMs < 2000, () -> "maxMillis was greater than 2000ms. Was " + maxMs + "ms.");
} | Flux.range(1, 10).flatMap(i -> Mono.fromCallable(OffsetDateTime::now)) | public void testOnlyOneThreadRefreshesToken() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return incrementalRemoteGetTokenAsync(new AtomicInteger(1));
});
CountDownLatch latch = new CountDownLatch(1);
Flux.range(1, 10).flatMap(ignored -> Mono.just(OffsetDateTime.now()))
.parallel(10)
.runOn(Schedulers.boundedElastic())
.flatMap(start -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
latch.await();
assertEquals(1, refreshes.get());
} | class TokenCacheTests {
private static final Random RANDOM = new Random();
@Test
@Test
public void testLongRunningWontOverflow() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return remoteGetTokenThatExpiresSoonAsync();
});
VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
CountDownLatch latch = new CountDownLatch(1);
Flux.interval(Duration.ofMillis(100), virtualTimeScheduler)
.take(100)
.flatMap(i -> Mono.just(OffsetDateTime.now())
.subscribeOn(Schedulers.parallel())
.flatMap(start -> cache.getToken()
.map(t -> Duration.between(start, OffsetDateTime.now()).toMillis())
.doOnNext(millis -> {
})))
.doOnComplete(latch::countDown)
.subscribe();
virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40));
latch.await();
Assertions.assertTrue(refreshes.get() <= 11);
}
private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() {
return Mono.delay(Duration.ofMillis(1000)).map(l -> new Token(Integer.toString(RANDOM.nextInt(100)), 0));
}
private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) {
return Mono.delay(Duration.ofSeconds(latency.getAndIncrement()))
.map(l -> new Token(Integer.toString(RANDOM.nextInt(100))));
}
private static class Token extends AccessToken {
Token(String token) {
this(token, 5000);
}
Token(String token, long validityInMillis) {
super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis)));
}
}
} | class TokenCacheTests {
private static final Random RANDOM = new Random();
@Test
@Test
public void testLongRunningWontOverflow() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return remoteGetTokenThatExpiresSoonAsync();
});
VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
CountDownLatch latch = new CountDownLatch(1);
Flux.interval(Duration.ofMillis(100), virtualTimeScheduler)
.take(100)
.flatMap(i -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40));
latch.await();
Assertions.assertTrue(refreshes.get() <= 11);
}
private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() {
return Mono.delay(Duration.ofMillis(1000)).map(l -> new Token(Integer.toString(RANDOM.nextInt(100)), 0));
}
private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) {
return Mono.delay(Duration.ofSeconds(latency.getAndIncrement()))
.map(l -> new Token(Integer.toString(RANDOM.nextInt(100))));
}
private static class Token extends AccessToken {
Token(String token) {
this(token, 5000);
}
Token(String token, long validityInMillis) {
super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis)));
}
}
} |
👍 this is great! | public void testOnlyOneThreadRefreshesToken() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return incrementalRemoteGetTokenAsync(new AtomicInteger(1));
});
CountDownLatch latch = new CountDownLatch(1);
Flux.range(1, 10).flatMap(ignored -> Mono.just(OffsetDateTime.now()))
.parallel(10)
.runOn(Schedulers.boundedElastic())
.flatMap(start -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
latch.await();
assertEquals(1, refreshes.get());
} | assertEquals(1, refreshes.get()); | public void testOnlyOneThreadRefreshesToken() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return incrementalRemoteGetTokenAsync(new AtomicInteger(1));
});
CountDownLatch latch = new CountDownLatch(1);
Flux.range(1, 10).flatMap(ignored -> Mono.just(OffsetDateTime.now()))
.parallel(10)
.runOn(Schedulers.boundedElastic())
.flatMap(start -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
latch.await();
assertEquals(1, refreshes.get());
} | class TokenCacheTests {
private static final Random RANDOM = new Random();
@Test
@Test
public void testLongRunningWontOverflow() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return remoteGetTokenThatExpiresSoonAsync();
});
VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
CountDownLatch latch = new CountDownLatch(1);
Flux.interval(Duration.ofMillis(100), virtualTimeScheduler)
.take(100)
.flatMap(i -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40));
latch.await();
Assertions.assertTrue(refreshes.get() <= 11);
}
private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() {
return Mono.delay(Duration.ofMillis(1000)).map(l -> new Token(Integer.toString(RANDOM.nextInt(100)), 0));
}
private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) {
return Mono.delay(Duration.ofSeconds(latency.getAndIncrement()))
.map(l -> new Token(Integer.toString(RANDOM.nextInt(100))));
}
private static class Token extends AccessToken {
Token(String token) {
this(token, 5000);
}
Token(String token, long validityInMillis) {
super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis)));
}
}
} | class TokenCacheTests {
private static final Random RANDOM = new Random();
@Test
@Test
public void testLongRunningWontOverflow() throws Exception {
AtomicLong refreshes = new AtomicLong(0);
SimpleTokenCache cache = new SimpleTokenCache(() -> {
refreshes.incrementAndGet();
return remoteGetTokenThatExpiresSoonAsync();
});
VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
CountDownLatch latch = new CountDownLatch(1);
Flux.interval(Duration.ofMillis(100), virtualTimeScheduler)
.take(100)
.flatMap(i -> cache.getToken())
.doOnComplete(latch::countDown)
.subscribe();
virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40));
latch.await();
Assertions.assertTrue(refreshes.get() <= 11);
}
private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() {
return Mono.delay(Duration.ofMillis(1000)).map(l -> new Token(Integer.toString(RANDOM.nextInt(100)), 0));
}
private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) {
return Mono.delay(Duration.ofSeconds(latency.getAndIncrement()))
.map(l -> new Token(Integer.toString(RANDOM.nextInt(100))));
}
private static class Token extends AccessToken {
Token(String token) {
this(token, 5000);
}
Token(String token, long validityInMillis) {
super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis)));
}
}
} |
Let's add verbose level logging when a redirect is performed, this will help generate breadcrumbs when troubleshooting potential issues | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptRedirect(context, next, context.getHttpRequest(), 1);
} | } | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return attemptRedirect(context, next, context.getHttpRequest(), 1, new HashSet<>());
} | class RedirectPolicy implements HttpPipelinePolicy {
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int MAX_REDIRECT_ATTEMPTS = 10;
private final RedirectStrategy redirectStrategy;
private Set<String> attemptedRedirectUrls = new HashSet<>();
private String redirectedEndpointUrl;
/**
* Creates {@link RedirectPolicy} with default {@link MaxAttemptRedirectStrategy} as {@link RedirectStrategy} and
* use the provided {@code statusCode} to determine if this request should be redirected
* and MAX_REDIRECT_ATTEMPTS for the try count.
*/
public RedirectPolicy() {
this(new MaxAttemptRedirectStrategy(MAX_REDIRECT_ATTEMPTS));
}
/**
* Creates {@link RedirectPolicy} with default {@link MaxAttemptRedirectStrategy} as {@link RedirectStrategy} and
* use the provided {@code statusCode} to determine if this request should be redirected.
*
* @param redirectStrategy The {@link RedirectStrategy} used for redirection.
* @throws NullPointerException When {@code statusCode} is null.
*/
public RedirectPolicy(RedirectStrategy redirectStrategy) {
this.redirectStrategy = Objects.requireNonNull(redirectStrategy, "'redirectStrategy' cannot be null.");
}
@Override
/**
* Function to process through the HTTP Response received in the pipeline
* and redirect sending the request with new redirect url.
*/
private Mono<HttpResponse> attemptRedirect(final HttpPipelineCallContext context,
final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest,
final int redirectAttempt) {
context.setHttpRequest(originalHttpRequest.copy());
if (this.redirectedEndpointUrl != null) {
context.getHttpRequest().setUrl(this.redirectedEndpointUrl);
}
return next.clone().process()
.flatMap(httpResponse -> {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), redirectStrategy.getLocationHeader());
if (isValidRedirectStatusCode(httpResponse.getStatusCode()) &&
isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod()) &&
responseLocation != null &&
redirectStrategy.shouldAttemptRedirect(responseLocation, redirectAttempt,
redirectStrategy.getMaxAttempts(), attemptedRedirectUrls)) {
attemptedRedirectUrls.add(responseLocation);
this.redirectedEndpointUrl = responseLocation;
return attemptRedirect(context, next, originalHttpRequest, redirectAttempt + 1);
}
return Mono.just(httpResponse);
});
}
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
return redirectStrategy.getAllowedMethods().contains(httpMethod);
}
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE;
}
private static String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : headerValue;
}
} | class RedirectPolicy implements HttpPipelinePolicy {
private final RedirectStrategy redirectStrategy;
/**
* Creates {@link RedirectPolicy} with default {@link DefaultRedirectStrategy} as {@link RedirectStrategy} and
* uses the redirect status response code (301, 302, 307, 308) to determine if this request should be redirected.
*/
public RedirectPolicy() {
this(new DefaultRedirectStrategy());
}
/**
* Creates {@link RedirectPolicy} with the provided {@code redirectStrategy} as {@link RedirectStrategy}
* to determine if this request should be redirected.
*
* @param redirectStrategy The {@link RedirectStrategy} used for redirection.
* @throws NullPointerException When {@code redirectStrategy} is {@code null}.
*/
public RedirectPolicy(RedirectStrategy redirectStrategy) {
this.redirectStrategy = Objects.requireNonNull(redirectStrategy, "'redirectStrategy' cannot be null.");
}
@Override
/**
* Function to process through the HTTP Response received in the pipeline
* and redirect sending the request with new redirect url.
*/
private Mono<HttpResponse> attemptRedirect(final HttpPipelineCallContext context,
final HttpPipelineNextPolicy next,
final HttpRequest originalHttpRequest,
final int redirectAttempt,
Set<String> attemptedRedirectUrls) {
context.setHttpRequest(originalHttpRequest.copy());
return next.clone().process()
.flatMap(httpResponse -> {
if (redirectStrategy.shouldAttemptRedirect(context, httpResponse, redirectAttempt,
attemptedRedirectUrls)) {
HttpRequest redirectRequestCopy = redirectStrategy.createRedirectRequest(httpResponse);
return httpResponse.getBody()
.ignoreElements()
.then(attemptRedirect(context, next, redirectRequestCopy, redirectAttempt + 1, attemptedRedirectUrls));
} else {
return Mono.just(httpResponse);
}
});
}
} |
We can use DEFAULT_ALLOWED_METHODS instead. | public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, new HashMap<Integer, HttpMethod>() {
{
put(HttpMethod.GET.ordinal(), HttpMethod.GET);
put(HttpMethod.HEAD.ordinal(), HttpMethod.HEAD);
}
});
} | } | public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 10;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final HashMap<Integer, HttpMethod> DEFAULT_ALLOWED_METHODS = new HashMap<Integer, HttpMethod>() {
{
put(HttpMethod.GET.ordinal(), HttpMethod.GET);
put(HttpMethod.HEAD.ordinal(), HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Map<Integer, HttpMethod> redirectMethods;
private final Set<String> attemptedRedirectUrls = new HashSet<>();
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 10,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @throws NullPointerException if {@code locationHeader} is {@code null}.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws NullPointerException if {@code locationHeader} is {@code null}.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Map<Integer, HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
this.locationHeader = locationHeader == null ? DEFAULT_REDIRECT_LOCATION_HEADER_NAME : locationHeader;
this.redirectMethods = allowedMethods == null ? DEFAULT_ALLOWED_METHODS : allowedMethods;
}
@Override
public boolean shouldAttemptRedirect(HttpResponse httpResponse, int tryCount) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& !alreadyAttemptedRedirectUrl(redirectUrl)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirect(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (responseLocation != null) {
attemptedRedirectUrls.add(responseLocation);
return httpResponse.getRequest().setUrl(responseLocation);
} else {
return httpResponse.getRequest();
}
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Map<Integer, HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error(String.format("Request was redirected more than once to: %s", redirectUrl));
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error(String.format("Request has been redirected more than %d times.", getMaxAttempts()));
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().containsKey(httpMethod.ordinal())) {
return true;
} else {
logger.error(
String.format("Request was redirected from a non redirect-able method: %s", httpMethod));
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name.
*/
private static String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : headerValue;
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
if (CoreUtils.isNullOrEmpty(locationHeader)) {
logger.error("'locationHeader' provided as null will be defaulted to {}",
DEFAULT_REDIRECT_LOCATION_HEADER_NAME);
this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME;
} else {
this.locationHeader = locationHeader;
}
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
logger.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS);
this.redirectMethods = DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
this.redirectMethods = allowedMethods;
}
}
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error("Request was redirected more than once to: {}", redirectUrl);
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error("Request has been redirected more than {} times.", getMaxAttempts());
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error("Request was redirected from an invalid redirect allowed method: {}", httpMethod);
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error("Redirect url was null for header name: {}, Request redirect was terminated", headerName);
return null;
} else {
return headerValue;
}
}
} |
We should include [temporary redirect](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/307) too. | private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE;
} | || statusCode == PERMANENT_REDIRECT_STATUS_CODE; | private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 10;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final HashMap<Integer, HttpMethod> DEFAULT_ALLOWED_METHODS = new HashMap<Integer, HttpMethod>() {
{
put(HttpMethod.GET.ordinal(), HttpMethod.GET);
put(HttpMethod.HEAD.ordinal(), HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Map<Integer, HttpMethod> redirectMethods;
private final Set<String> attemptedRedirectUrls = new HashSet<>();
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 10,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @throws NullPointerException if {@code locationHeader} is {@code null}.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, new HashMap<Integer, HttpMethod>() {
{
put(HttpMethod.GET.ordinal(), HttpMethod.GET);
put(HttpMethod.HEAD.ordinal(), HttpMethod.HEAD);
}
});
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws NullPointerException if {@code locationHeader} is {@code null}.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Map<Integer, HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
this.locationHeader = locationHeader == null ? DEFAULT_REDIRECT_LOCATION_HEADER_NAME : locationHeader;
this.redirectMethods = allowedMethods == null ? DEFAULT_ALLOWED_METHODS : allowedMethods;
}
@Override
public boolean shouldAttemptRedirect(HttpResponse httpResponse, int tryCount) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& !alreadyAttemptedRedirectUrl(redirectUrl)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirect(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (responseLocation != null) {
attemptedRedirectUrls.add(responseLocation);
return httpResponse.getRequest().setUrl(responseLocation);
} else {
return httpResponse.getRequest();
}
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Map<Integer, HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error(String.format("Request was redirected more than once to: %s", redirectUrl));
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error(String.format("Request has been redirected more than %d times.", getMaxAttempts()));
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().containsKey(httpMethod.ordinal())) {
return true;
} else {
logger.error(
String.format("Request was redirected from a non redirect-able method: %s", httpMethod));
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method.
*/
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name.
*/
private static String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
return CoreUtils.isNullOrEmpty(headerValue) ? null : headerValue;
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
if (CoreUtils.isNullOrEmpty(locationHeader)) {
logger.error("'locationHeader' provided as null will be defaulted to {}",
DEFAULT_REDIRECT_LOCATION_HEADER_NAME);
this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME;
} else {
this.locationHeader = locationHeader;
}
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
logger.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS);
this.redirectMethods = DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
this.redirectMethods = allowedMethods;
}
}
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error("Request was redirected more than once to: {}", redirectUrl);
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error("Request has been redirected more than {} times.", getMaxAttempts());
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error("Request was redirected from an invalid redirect allowed method: {}", httpMethod);
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error("Redirect url was null for header name: {}, Request redirect was terminated", headerName);
return null;
} else {
return headerValue;
}
}
} |
We should use default values if `locationHeader` and `allowedMethods` are null or *empty* and document it. | public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
this.locationHeader = locationHeader == null ? DEFAULT_REDIRECT_LOCATION_HEADER_NAME : locationHeader;
this.redirectMethods = allowedMethods == null ? DEFAULT_REDIRECT_ALLOWED_METHODS : allowedMethods;
} | this.redirectMethods = allowedMethods == null ? DEFAULT_REDIRECT_ALLOWED_METHODS : allowedMethods; | public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
if (CoreUtils.isNullOrEmpty(locationHeader)) {
logger.error("'locationHeader' provided as null will be defaulted to {}",
DEFAULT_REDIRECT_LOCATION_HEADER_NAME);
this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME;
} else {
this.locationHeader = locationHeader;
}
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
logger.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS);
this.redirectMethods = DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
this.redirectMethods = allowedMethods;
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error(String.format("Request was redirected more than once to: %s", redirectUrl));
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error(String.format("Request has been redirected more than %d times.", getMaxAttempts()));
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error(
String.format("Request was redirected from an invalid redirect allowed method: %s", httpMethod));
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
private String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error(String.format("Redirect url was null for header name: %s, Request redirect was terminated"
, headerName));
return null;
} else {
return headerValue;
}
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error("Request was redirected more than once to: {}", redirectUrl);
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error("Request has been redirected more than {} times.", getMaxAttempts());
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error("Request was redirected from an invalid redirect allowed method: {}", httpMethod);
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error("Redirect url was null for header name: {}, Request redirect was terminated", headerName);
return null;
} else {
return headerValue;
}
}
} |
or -DAZURE_STORAGE_SAS_SERVICE_VERSION sys property. Please also mention that env variable and/or property must be set before or at JVM start. Otherwise result is not deterministic. | public static void main(String[] args) {
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
String endpoint = String.format(Locale.ROOT, "https:
BlobServiceVersion serviceVersion = BlobServiceVersion.V2019_02_02;
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.serviceVersion(serviceVersion)
.endpoint(endpoint).credential(credential).buildClient();
/*
If the workload includes generating new SAS tokens using an older format, the version of the sas generation can also
be configured. Set the environment variable AZURE_STORAGE_SAS_SERVICE_VERSION to the service version which
corresponds to the desired SAS format to configure this behavior.
*/
} | corresponds to the desired SAS format to configure this behavior. | public static void main(String[] args) {
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
String endpoint = String.format(Locale.ROOT, "https:
BlobServiceVersion serviceVersion = BlobServiceVersion.V2019_02_02;
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.serviceVersion(serviceVersion)
.endpoint(endpoint).credential(credential).buildClient();
/*
If the workload includes generating new SAS tokens using an older format, the version of the sas generation can
also be configured. Before starting the jvm, set the environment variable AZURE_STORAGE_SAS_SERVICE_VERSION or
the JVM system propert -DAZURE_STORAGE_SAS_SERVICE_VERSION to the service version which corresponds to the
desired SAS format to configure this behavior.
*/
} | class ServiceVersionExample {
} | class ServiceVersionExample {
} |
```suggestion the JVM system property AZURE_STORAGE_SAS_SERVICE_VERSION (e.g. -DAZURE_STORAGE_SAS_SERVICE_VERSION=2019-12-12) to the service version which corresponds to the ``` | public static void main(String[] args) {
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
String endpoint = String.format(Locale.ROOT, "https:
BlobServiceVersion serviceVersion = BlobServiceVersion.V2019_02_02;
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.serviceVersion(serviceVersion)
.endpoint(endpoint).credential(credential).buildClient();
/*
If the workload includes generating new SAS tokens using an older format, the version of the sas generation can
also be configured. Before starting the jvm, set the environment variable AZURE_STORAGE_SAS_SERVICE_VERSION or
the JVM system propert -DAZURE_STORAGE_SAS_SERVICE_VERSION to the service version which corresponds to the
desired SAS format to configure this behavior.
*/
} | the JVM system propert -DAZURE_STORAGE_SAS_SERVICE_VERSION to the service version which corresponds to the | public static void main(String[] args) {
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
String endpoint = String.format(Locale.ROOT, "https:
BlobServiceVersion serviceVersion = BlobServiceVersion.V2019_02_02;
BlobServiceClient storageClient = new BlobServiceClientBuilder()
.serviceVersion(serviceVersion)
.endpoint(endpoint).credential(credential).buildClient();
/*
If the workload includes generating new SAS tokens using an older format, the version of the sas generation can
also be configured. Before starting the jvm, set the environment variable AZURE_STORAGE_SAS_SERVICE_VERSION or
the JVM system propert -DAZURE_STORAGE_SAS_SERVICE_VERSION to the service version which corresponds to the
desired SAS format to configure this behavior.
*/
} | class ServiceVersionExample {
} | class ServiceVersionExample {
} |
This is even worse! :) Don't do string concat, just use the var-args method to pass in the arguments like elsewhere in this code. | public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
if (CoreUtils.isNullOrEmpty(locationHeader)) {
logger.error(
"'locationHeader' provided as null will be defaulted to " + DEFAULT_REDIRECT_LOCATION_HEADER_NAME);
this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME;
} else {
this.locationHeader = locationHeader;
}
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
logger.error("'allowedMethods' provided as null will be defaulted to " + DEFAULT_REDIRECT_ALLOWED_METHODS);
this.redirectMethods = DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
this.redirectMethods = allowedMethods;
}
} | "'locationHeader' provided as null will be defaulted to " + DEFAULT_REDIRECT_LOCATION_HEADER_NAME); | public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
if (CoreUtils.isNullOrEmpty(locationHeader)) {
logger.error("'locationHeader' provided as null will be defaulted to {}",
DEFAULT_REDIRECT_LOCATION_HEADER_NAME);
this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME;
} else {
this.locationHeader = locationHeader;
}
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
logger.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS);
this.redirectMethods = DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
this.redirectMethods = allowedMethods;
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error("Request was redirected more than once to: " + redirectUrl);
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error("Request has been redirected more than " + getMaxAttempts() + " times.");
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error(
"Request was redirected from an invalid redirect allowed method: " + httpMethod);
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
private String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error("Redirect url was null for header name: " + headerName + ", Request redirect was terminated");
return null;
} else {
return headerValue;
}
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error("Request was redirected more than once to: {}", redirectUrl);
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error("Request has been redirected more than {} times.", getMaxAttempts());
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error("Request was redirected from an invalid redirect allowed method: {}", httpMethod);
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error("Redirect url was null for header name: {}, Request redirect was terminated", headerName);
return null;
} else {
return headerValue;
}
}
} |
updated! | public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
if (CoreUtils.isNullOrEmpty(locationHeader)) {
logger.error(
"'locationHeader' provided as null will be defaulted to " + DEFAULT_REDIRECT_LOCATION_HEADER_NAME);
this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME;
} else {
this.locationHeader = locationHeader;
}
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
logger.error("'allowedMethods' provided as null will be defaulted to " + DEFAULT_REDIRECT_ALLOWED_METHODS);
this.redirectMethods = DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
this.redirectMethods = allowedMethods;
}
} | "'locationHeader' provided as null will be defaulted to " + DEFAULT_REDIRECT_LOCATION_HEADER_NAME); | public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw logger.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
if (CoreUtils.isNullOrEmpty(locationHeader)) {
logger.error("'locationHeader' provided as null will be defaulted to {}",
DEFAULT_REDIRECT_LOCATION_HEADER_NAME);
this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME;
} else {
this.locationHeader = locationHeader;
}
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
logger.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS);
this.redirectMethods = DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
this.redirectMethods = allowedMethods;
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error("Request was redirected more than once to: " + redirectUrl);
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error("Request has been redirected more than " + getMaxAttempts() + " times.");
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error(
"Request was redirected from an invalid redirect allowed method: " + httpMethod);
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
private String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error("Redirect url was null for header name: " + headerName + ", Request redirect was terminated");
return null;
} else {
return headerValue;
}
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private final ClientLogger logger = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location";
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<HttpMethod>() {
{
add(HttpMethod.GET);
add(HttpMethod.HEAD);
}
};
private final int maxAttempts;
private final String locationHeader;
private final Set<HttpMethod> redirectMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3,
* header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*/
public DefaultRedirectStrategy() {
this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and
* default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod
* and {@link HttpMethod
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts) {
this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS);
}
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
@Override
public boolean shouldAttemptRedirect(HttpPipelineCallContext context,
HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
String redirectUrl =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
if (isValidRedirectCount(tryCount)
&& redirectUrl != null
&& !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)
&& isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
logger.verbose("[Redirecting] Try count: {}, Attempted Redirect URLs: {}", tryCount,
attemptedRedirectUrls.toString());
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
String responseLocation =
tryGetRedirectHeader(httpResponse.getHeaders(), this.getLocationHeader());
return httpResponse.getRequest().setUrl(responseLocation);
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public String getLocationHeader() {
return locationHeader;
}
@Override
public Set<HttpMethod> getAllowedMethods() {
return redirectMethods;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect
* , {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
logger.error("Request was redirected more than once to: {}", redirectUrl);
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
logger.error("Request has been redirected more than {} times.", getMaxAttempts());
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (getAllowedMethods().contains(httpMethod)) {
return true;
} else {
logger.error("Request was redirected from an invalid redirect allowed method: {}", httpMethod);
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
/**
* Gets the redirect url from the response headers.
*
* @param headers the http response headers.
* @param headerName the header name to look up value for.
* @return the header value for the provided header name, {@code null} otherwise.
*/
String tryGetRedirectHeader(HttpHeaders headers, String headerName) {
String headerValue = headers.getValue(headerName);
if (CoreUtils.isNullOrEmpty(headerValue)) {
logger.error("Redirect url was null for header name: {}, Request redirect was terminated", headerName);
return null;
} else {
return headerValue;
}
}
} |
I'm guessing this is to make these overridable and have "spy" in tests. Should we take KeyGenerator and Cipher references in ctor instead? | Cipher generateCipher(SecretKey aesKey) throws GeneralSecurityException {
Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING);
cipher.init(Cipher.ENCRYPT_MODE, aesKey);
return cipher;
} | } | Cipher generateCipher(SecretKey aesKey) throws GeneralSecurityException {
Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING);
cipher.init(Cipher.ENCRYPT_MODE, aesKey);
return cipher;
} | class EncryptedBlobAsyncClient extends BlobAsyncClient {
static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB;
private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class);
/**
* An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption.
*/
private final AsyncKeyEncryptionKey keyWrapper;
/**
* A {@link String} that is used to wrap/unwrap the content key during encryption.
*/
private final String keyWrapAlgorithm;
/**
* Package-private constructor for use by {@link EncryptedBlobClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param blobName The blob name.
* @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param key The key used to encrypt and decrypt data.
* @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption.
* @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob
* version.
*/
EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName,
String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey,
EncryptionScope encryptionScope, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) {
super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey,
encryptionScope, versionId);
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
}
/**
* Creates a new {@link EncryptedBlobAsyncClient} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link EncryptedBlobAsyncClient} with the specified {@code encryptionScope}.
*/
@Override
public EncryptedBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) {
EncryptionScope finalEncryptionScope = null;
if (encryptionScope != null) {
finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope);
}
return new EncryptedBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope,
keyWrapper, keyWrapAlgorithm, getVersionId());
}
/**
* Creates a new {@link EncryptedBlobAsyncClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, pass {@code null} to use no customer
* provided key.
* @return a {@link EncryptedBlobAsyncClient} with the specified {@code customerProvidedKey}.
*/
@Override
public EncryptedBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new EncryptedBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, keyWrapper,
keyWrapAlgorithm, getVersionId());
}
/**
* Creates a new block blob. By default, this method will not overwrite an existing blob.
* <p>
* Updating an existing block blob overwrites any existing blob metadata. Partial updates are not supported with
* this method; the content of the existing blob is overwritten with the new content. To perform a partial update of
* block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) {
try {
return this.upload(data, parallelTransferOptions, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob.
* <p>
* Updating an existing block blob overwrites any existing blob metadata. Partial updates are not supported with
* this method; the content of the existing blob is overwritten with the new content. To perform a partial update of
* block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param overwrite Whether to overwrite if the blob exists.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions,
boolean overwrite) {
try {
Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null,
null).flatMap(FluxUtil::toMono);
if (overwrite) {
return uploadTask;
} else {
return exists().flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing blob metadata. Partial updates are not supported with this method; the content of the
* existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* which this method uses internally. For more information, see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any metadata
* key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data,
ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata,
AccessTier tier, BlobRequestConditions requestConditions) {
return this.uploadWithResponse(new BlobParallelUploadOptions(data)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(tier).setRequestConditions(requestConditions));
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing blob metadata. Partial updates are not supported with this method; the content of the
* existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* which this method uses internally. For more information, see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
*
* @param options {@link BlobParallelUploadOptions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) {
try {
StorageImplUtils.assertNotNull("options", options);
final Map<String, String> metadataFinal = options.getMetadata() == null
? new HashMap<>() : options.getMetadata();
Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer(
options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, false)
: options.getDataFlux();
Flux<ByteBuffer> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal);
return super.uploadWithResponse(new BlobParallelUploadOptions(dataFinal)
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions())
.setComputeMd5(options.isComputeMd5()));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob with the content of the specified file. By default, this method will not overwrite
* existing data
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @return An empty response
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> uploadFromFile(String filePath) {
try {
return uploadFromFile(filePath, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param overwrite Whether to overwrite should the blob exist.
* @return An empty response
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> uploadFromFile(String filePath, boolean overwrite) {
try {
Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null);
if (overwrite) {
return uploadTask;
} else {
return exists().flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any metadata
* key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return An empty response
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000 MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier,
BlobRequestConditions requestConditions) {
return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(tier).setRequestConditions(requestConditions))
.then();
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse
*
* @param options {@link BlobUploadFromFileOptions}
* @return A reactive response containing the information of the uploaded block blob.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000 MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) {
try {
StorageImplUtils.assertNotNull("options", options);
return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger),
channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel))
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions()))
.doOnTerminate(() -> {
try {
channel.close();
} catch (IOException e) {
throw logger.logExceptionAsError(new UncheckedIOException(e));
}
}), channel -> UploadUtils.uploadFileCleanup(channel, logger));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts the given Flux ByteBuffer.
*
* @param plainTextFlux The Flux ByteBuffer to be encrypted.
* @return A {@link EncryptedBlob}
*/
Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) {
Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null");
try {
SecretKey aesKey = generateSecretKey();
Cipher cipher = generateCipher(aesKey);
Map<String, String> keyWrappingMetadata = new HashMap<>();
keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY,
CryptographyConstants.AGENT_METADATA_VALUE);
return keyWrapper.getKeyId().flatMap(keyId -> keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded())
.map(encryptedKey -> {
WrappedKey wrappedKey = new WrappedKey(keyId, encryptedKey, keyWrapAlgorithm);
EncryptionData encryptionData = new EncryptionData()
.setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE)
.setEncryptionAgent(new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1,
EncryptionAlgorithm.AES_CBC_256))
.setKeyWrappingMetadata(keyWrappingMetadata)
.setContentEncryptionIV(cipher.getIV())
.setWrappedContentKey(wrappedKey);
Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> {
int outputSize = cipher.getOutputSize(plainTextBuffer.remaining());
/*
* This should be the only place we allocate memory in encryptBlob(). Although there is an
* overload that can encrypt in place that would save allocations, we do not want to overwrite
* customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable,
* we should implement pooling.
*/
ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize);
int encryptedBytes;
try {
encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer);
} catch (ShortBufferException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
encryptedTextBuffer.position(0);
encryptedTextBuffer.limit(encryptedBytes);
return encryptedTextBuffer;
});
/*
* Defer() ensures the contained code is not executed until the Flux is subscribed to, in
* other words, cipher.doFinal() will not be called until the plainTextFlux has completed
* and therefore all other data has been encrypted.
*/
encryptedTextFlux = Flux.concat(encryptedTextFlux,
Mono.fromCallable(() -> ByteBuffer.wrap(cipher.doFinal())));
return new EncryptedBlob(encryptionData, encryptedTextFlux);
}));
} catch (GeneralSecurityException e) {
throw logger.logExceptionAsError(new RuntimeException(e));
}
}
SecretKey generateSecretKey() throws NoSuchAlgorithmException {
KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES);
keyGen.init(256);
return keyGen.generateKey();
}
/**
* Encrypt the blob and add the encryption metadata to the customer's metadata.
*
* @param plainText The data to encrypt
* @param metadata The customer's metadata to be updated.
* @return A Mono containing the cipher text
*/
private Flux<ByteBuffer> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) {
return this.encryptBlob(plainText).flatMapMany(encryptedBlob -> {
try {
metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY,
encryptedBlob.getEncryptionData().toJsonString());
return encryptedBlob.getCiphertextFlux();
} catch (JsonProcessingException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
});
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Flux<ByteBuffer> query(String expression) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
} | class EncryptedBlobAsyncClient extends BlobAsyncClient {
static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB;
private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class);
/**
* An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption.
*/
private final AsyncKeyEncryptionKey keyWrapper;
/**
* A {@link String} that is used to wrap/unwrap the content key during encryption.
*/
private final String keyWrapAlgorithm;
/**
* Package-private constructor for use by {@link EncryptedBlobClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param blobName The blob name.
* @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param key The key used to encrypt and decrypt data.
* @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption.
* @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob
* version.
*/
EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName,
String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey,
EncryptionScope encryptionScope, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) {
super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey,
encryptionScope, versionId);
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
}
/**
* Creates a new {@link EncryptedBlobAsyncClient} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link EncryptedBlobAsyncClient} with the specified {@code encryptionScope}.
*/
@Override
public EncryptedBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) {
EncryptionScope finalEncryptionScope = null;
if (encryptionScope != null) {
finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope);
}
return new EncryptedBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope,
keyWrapper, keyWrapAlgorithm, getVersionId());
}
/**
* Creates a new {@link EncryptedBlobAsyncClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, pass {@code null} to use no customer
* provided key.
* @return a {@link EncryptedBlobAsyncClient} with the specified {@code customerProvidedKey}.
*/
@Override
public EncryptedBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new EncryptedBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, keyWrapper,
keyWrapAlgorithm, getVersionId());
}
/**
* Creates a new block blob. By default, this method will not overwrite an existing blob.
* <p>
* Updating an existing block blob overwrites any existing blob metadata. Partial updates are not supported with
* this method; the content of the existing blob is overwritten with the new content. To perform a partial update of
* block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) {
try {
return this.upload(data, parallelTransferOptions, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob.
* <p>
* Updating an existing block blob overwrites any existing blob metadata. Partial updates are not supported with
* this method; the content of the existing blob is overwritten with the new content. To perform a partial update of
* block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param overwrite Whether to overwrite if the blob exists.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions,
boolean overwrite) {
try {
Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null,
null).flatMap(FluxUtil::toMono);
if (overwrite) {
return uploadTask;
} else {
return exists().flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing blob metadata. Partial updates are not supported with this method; the content of the
* existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* which this method uses internally. For more information, see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any metadata
* key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data,
ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata,
AccessTier tier, BlobRequestConditions requestConditions) {
return this.uploadWithResponse(new BlobParallelUploadOptions(data)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(tier).setRequestConditions(requestConditions));
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing blob metadata. Partial updates are not supported with this method; the content of the
* existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* which this method uses internally. For more information, see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
*
* @param options {@link BlobParallelUploadOptions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) {
try {
StorageImplUtils.assertNotNull("options", options);
final Map<String, String> metadataFinal = options.getMetadata() == null
? new HashMap<>() : options.getMetadata();
Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer(
options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, false)
: options.getDataFlux();
Flux<ByteBuffer> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal);
return super.uploadWithResponse(new BlobParallelUploadOptions(dataFinal)
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions())
.setComputeMd5(options.isComputeMd5()));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob with the content of the specified file. By default, this method will not overwrite
* existing data
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @return An empty response
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> uploadFromFile(String filePath) {
try {
return uploadFromFile(filePath, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param overwrite Whether to overwrite should the blob exist.
* @return An empty response
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> uploadFromFile(String filePath, boolean overwrite) {
try {
Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null);
if (overwrite) {
return uploadTask;
} else {
return exists().flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any metadata
* key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return An empty response
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000 MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier,
BlobRequestConditions requestConditions) {
return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(tier).setRequestConditions(requestConditions))
.then();
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse
*
* @param options {@link BlobUploadFromFileOptions}
* @return A reactive response containing the information of the uploaded block blob.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000 MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) {
try {
StorageImplUtils.assertNotNull("options", options);
return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger),
channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel))
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions()))
.doOnTerminate(() -> {
try {
channel.close();
} catch (IOException e) {
throw logger.logExceptionAsError(new UncheckedIOException(e));
}
}), channel -> UploadUtils.uploadFileCleanup(channel, logger));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts the given Flux ByteBuffer.
*
* @param plainTextFlux The Flux ByteBuffer to be encrypted.
* @return A {@link EncryptedBlob}
*/
Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) {
Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null");
try {
SecretKey aesKey = generateSecretKey();
Cipher cipher = generateCipher(aesKey);
Map<String, String> keyWrappingMetadata = new HashMap<>();
keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY,
CryptographyConstants.AGENT_METADATA_VALUE);
return keyWrapper.getKeyId().flatMap(keyId -> keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded())
.map(encryptedKey -> {
WrappedKey wrappedKey = new WrappedKey(keyId, encryptedKey, keyWrapAlgorithm);
EncryptionData encryptionData = new EncryptionData()
.setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE)
.setEncryptionAgent(new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1,
EncryptionAlgorithm.AES_CBC_256))
.setKeyWrappingMetadata(keyWrappingMetadata)
.setContentEncryptionIV(cipher.getIV())
.setWrappedContentKey(wrappedKey);
Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> {
int outputSize = cipher.getOutputSize(plainTextBuffer.remaining());
/*
* This should be the only place we allocate memory in encryptBlob(). Although there is an
* overload that can encrypt in place that would save allocations, we do not want to overwrite
* customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable,
* we should implement pooling.
*/
ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize);
int encryptedBytes;
try {
encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer);
} catch (ShortBufferException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
encryptedTextBuffer.position(0);
encryptedTextBuffer.limit(encryptedBytes);
return encryptedTextBuffer;
});
/*
* Defer() ensures the contained code is not executed until the Flux is subscribed to, in
* other words, cipher.doFinal() will not be called until the plainTextFlux has completed
* and therefore all other data has been encrypted.
*/
encryptedTextFlux = Flux.concat(encryptedTextFlux,
Mono.fromCallable(() -> ByteBuffer.wrap(cipher.doFinal())));
return new EncryptedBlob(encryptionData, encryptedTextFlux);
}));
} catch (GeneralSecurityException e) {
throw logger.logExceptionAsError(new RuntimeException(e));
}
}
SecretKey generateSecretKey() throws NoSuchAlgorithmException {
KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES);
keyGen.init(256);
return keyGen.generateKey();
}
/**
* Encrypt the blob and add the encryption metadata to the customer's metadata.
*
* @param plainText The data to encrypt
* @param metadata The customer's metadata to be updated.
* @return A Mono containing the cipher text
*/
private Flux<ByteBuffer> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) {
return this.encryptBlob(plainText).flatMapMany(encryptedBlob -> {
try {
metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY,
encryptedBlob.getEncryptionData().toJsonString());
return encryptedBlob.getCiphertextFlux();
} catch (JsonProcessingException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
});
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Flux<ByteBuffer> query(String expression) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
} |
We have to do blocking call as change feed processor using synchronous error handling to kill the worker. https://github.com/Azure/azure-sdk-for-java/blob/6fef0a30aeab08cb6a9c3db75a26bfdf2b6b16fc/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/implementation/DefaultObserver.java#L40 | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<byte[]> byteArrayList = jsonNodes.stream()
.map(node -> feedContainer.cosmosSerializerToStream(node))
.collect(Collectors.toList());
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | ).collectList().doOnSuccess(consumer).block(); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
thanks for explanation. Later this consumer may be invoked on the io thread. Does that cause any problem (thread deadlock, etc)? or the fact that you are using publishOn takes care of that? @milismsft is there any other workaround? | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<byte[]> byteArrayList = jsonNodes.stream()
.map(node -> feedContainer.cosmosSerializerToStream(node))
.collect(Collectors.toList());
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | ).collectList().doOnSuccess(consumer).block(); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
@moderakh yes for the same concern of block on IO thread i moved the publisher on bounded elastic where we can do the blocking call. This will handled the scenario | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<byte[]> byteArrayList = jsonNodes.stream()
.map(node -> feedContainer.cosmosSerializerToStream(node))
.collect(Collectors.toList());
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | ).collectList().doOnSuccess(consumer).block(); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
A better option is to have the encryption implement its own ChangeFeedObserver class (see https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/ChangeFeedObserver.java). You can also add a property into the ChangeFeedProcessorBuilderImpl to toggle between regular CFP and encrypted CFP when calling the "factory" class to create the respective instances. | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<byte[]> byteArrayList = jsonNodes.stream()
.map(node -> feedContainer.cosmosSerializerToStream(node))
.collect(Collectors.toList());
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | ).collectList().doOnSuccess(consumer).block(); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
ObjectNode objectNode = jsonNode.deepCopy(); https://stackoverflow.com/questions/32713109/how-to-convert-jsonnode-to-objectnode | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList()); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
@milismsft Encryption have to access the ChangeFeedProcessorBuilderImpl which is implementation class in cosmos project, also ChangeFeedProcessorBuilderImpl will have to handle encryption logic, we want cosmos sdk to have minimal encryption logic . IMA if we can avoid cosmos dependency on encryption project and treat encryption project as a wrapper as much as possible that would be good. Coming back to this blocking call, I think ideal solution would have been the async consumer in core sdk , but that would breaking change now. What concern we have with the current approach, and if we have very strong opinion against current design , i am open to other approach as well. | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<byte[]> byteArrayList = jsonNodes.stream()
.map(node -> feedContainer.cosmosSerializerToStream(node))
.collect(Collectors.toList());
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | ).collectList().doOnSuccess(consumer).block(); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
In our case all these are object node , we don't need deepcopy. We will revisit logic once we work on this work item of aggregated queries https://github.com/Azure/azure-sdk-for-java/issues/23160 | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList()); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
Discussed offline, added object node check and throw custom exception if it is not | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList()); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
Discussed offline with @milismsft we are good here from this PR scope, however created work item to add support in change feed processor to support customer observer https://github.com/Azure/azure-sdk-for-java/issues/23738 | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<byte[]> byteArrayList = jsonNodes.stream()
.map(node -> feedContainer.cosmosSerializerToStream(node))
.collect(Collectors.toList());
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> feedContainer.decryptResponse((ObjectNode) jsonNode)).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | ).collectList().doOnSuccess(consumer).block(); | public ChangeFeedEncryptionProcessorBuilder handleChanges(Consumer<List<JsonNode>> consumer) {
this.encryptionConsumer = jsonNodes -> {
List<Mono<byte[]>> byteArrayMonoList =
jsonNodes.stream().map(jsonNode -> {
if (jsonNode.isObject()) {
return feedContainer.decryptResponse((ObjectNode) jsonNode);
} else {
throw new IllegalStateException("Current operation not supported in change feed encryption");
}
}).collect(Collectors.toList());
Flux.concat(byteArrayMonoList).publishOn(Schedulers.boundedElastic()).map(
item -> feedContainer.getItemDeserializer().parseFrom(JsonNode.class, item)
).collectList().doOnSuccess(consumer).block();
};
return this;
} | class to build a encryption supported {@link ChangeFeedProcessor} | class to build a encryption supported {@link ChangeFeedProcessor} |
Do we need to use set() for `ArrayNode`s? | JsonWebKey transformSecretKey(SecretKey secretKey) throws JsonProcessingException {
ObjectNode rootNode = MAPPER.createObjectNode();
ArrayNode a = MAPPER.createArrayNode();
a.add(KeyOperation.WRAP_KEY.toString());
a.add(KeyOperation.UNWRAP_KEY.toString());
a.add(KeyOperation.ENCRYPT.toString());
a.add(KeyOperation.DECRYPT.toString());
rootNode.put("k", Base64.getUrlDecoder().decode(secretKey.getValue()));
rootNode.put("kid", this.keyId);
rootNode.put("kty", KeyType.OCT.toString());
rootNode.set("key_ops", a);
return MAPPER.treeToValue(rootNode, JsonWebKey.class);
} | rootNode.set("key_ops", a); | JsonWebKey transformSecretKey(SecretKey secretKey) throws JsonProcessingException {
ObjectNode rootNode = MAPPER.createObjectNode();
ArrayNode a = MAPPER.createArrayNode();
a.add(KeyOperation.WRAP_KEY.toString());
a.add(KeyOperation.UNWRAP_KEY.toString());
a.add(KeyOperation.ENCRYPT.toString());
a.add(KeyOperation.DECRYPT.toString());
rootNode.put("k", Base64.getUrlDecoder().decode(secretKey.getValue()));
rootNode.put("kid", this.keyId);
rootNode.put("kty", KeyType.OCT.toString());
rootNode.set("key_ops", a);
return MAPPER.treeToValue(rootNode, JsonWebKey.class);
} | class CryptographyServiceClient {
private static final ObjectMapper MAPPER = new ObjectMapper();
final String apiVersion;
static final String ACCEPT_LANGUAGE = "en-US";
static final String CONTENT_TYPE_HEADER_VALUE = "application/json";
private final ClientLogger logger = new ClientLogger(CryptographyServiceClient.class);
private final CryptographyService service;
private String vaultUrl;
private String version;
private String keyName;
private final String keyId;
CryptographyServiceClient(String keyId, CryptographyService service, CryptographyServiceVersion serviceVersion) {
Objects.requireNonNull(keyId);
unpackId(keyId);
this.keyId = keyId;
this.service = service;
apiVersion = serviceVersion.getVersion();
}
Mono<Response<KeyVaultKey>> getKey(Context context) {
if (version == null) {
version = "";
}
return getKey(keyName, version, context);
}
private Mono<Response<KeyVaultKey>> getKey(String name, String version, Context context) {
context = context == null ? Context.NONE : context;
return service.getKey(vaultUrl, name, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", name))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", name, error));
}
Mono<Response<JsonWebKey>> getSecretKey(Context context) {
return service.getSecret(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", keyName))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", keyName, error))
.flatMap((stringResponse -> {
KeyVaultKey key = null;
try {
return Mono.just(new SimpleResponse<>(stringResponse.getRequest(),
stringResponse.getStatusCode(),
stringResponse.getHeaders(), transformSecretKey(stringResponse.getValue())));
} catch (JsonProcessingException e) {
return Mono.error(e);
}
}));
}
Mono<Response<SecretKey>> setSecretKey(SecretKey secret, Context context) {
context = context == null ? Context.NONE : context;
Objects.requireNonNull(secret, "The Secret input parameter cannot be null.");
SecretRequestParameters parameters = new SecretRequestParameters()
.setValue(secret.getValue())
.setTags(secret.getProperties().getTags())
.setContentType(secret.getProperties().getContentType())
.setSecretAttributes(new SecretRequestAttributes(secret.getProperties()));
return service.setSecret(vaultUrl, secret.getName(), apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Setting secret - {}", secret.getName()))
.doOnSuccess(response -> logger.verbose("Set secret - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to set secret - {}", secret.getName(), error));
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(plaintext);
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = encryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(encryptParameters.getPlainText())
.setIv(encryptParameters.getIv())
.setAdditionalAuthenticatedData(encryptParameters.getAdditionalAuthenticatedData());
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(ciphertext, "'ciphertext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(ciphertext);
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = decryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(decryptParameters.getCipherText())
.setIv(decryptParameters.getIv())
.setAdditionalAuthenticatedData(decryptParameters.getAdditionalAuthenticatedData())
.setAuthenticationTag(decryptParameters.getAuthenticationTag());
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
KeySignRequest parameters = new KeySignRequest().setAlgorithm(algorithm).setValue(digest);
context = context == null ? Context.NONE : context;
return service.sign(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Signing content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved signed content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to sign content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new SignResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
KeyVerifyRequest parameters =
new KeyVerifyRequest().setAlgorithm(algorithm).setDigest(digest).setSignature(signature);
context = context == null ? Context.NONE : context;
return service.verify(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Verifying content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved verified content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new VerifyResult(response.getValue().getValue(), algorithm, keyId)));
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(key);
context = context == null ? Context.NONE : context;
return service.wrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Wrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved wrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new WrapResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(encryptedKey);
context = context == null ? Context.NONE : context;
return service.unwrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Unwrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved unwrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to unwrap key content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new UnwrapResult(response.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return sign(algorithm, digest, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return verify(algorithm, digest, signature, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
private void unpackId(String keyId) {
if (keyId != null && keyId.length() > 0) {
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
this.vaultUrl = url.getProtocol() + ":
this.keyName = (tokens.length >= 3 ? tokens[2] : null);
this.version = (tokens.length >= 4 ? tokens[3] : null);
} catch (MalformedURLException e) {
e.printStackTrace();
}
}
}
} | class CryptographyServiceClient {
private static final ObjectMapper MAPPER = new ObjectMapper();
final String apiVersion;
static final String ACCEPT_LANGUAGE = "en-US";
static final String CONTENT_TYPE_HEADER_VALUE = "application/json";
private final ClientLogger logger = new ClientLogger(CryptographyServiceClient.class);
private final CryptographyService service;
private String vaultUrl;
private String version;
private String keyName;
private final String keyId;
CryptographyServiceClient(String keyId, CryptographyService service, CryptographyServiceVersion serviceVersion) {
Objects.requireNonNull(keyId);
unpackId(keyId);
this.keyId = keyId;
this.service = service;
apiVersion = serviceVersion.getVersion();
}
Mono<Response<KeyVaultKey>> getKey(Context context) {
if (version == null) {
version = "";
}
return getKey(keyName, version, context);
}
private Mono<Response<KeyVaultKey>> getKey(String name, String version, Context context) {
context = context == null ? Context.NONE : context;
return service.getKey(vaultUrl, name, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", name))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", name, error));
}
Mono<Response<JsonWebKey>> getSecretKey(Context context) {
return service.getSecret(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", keyName))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", keyName, error))
.flatMap((stringResponse -> {
KeyVaultKey key = null;
try {
return Mono.just(new SimpleResponse<>(stringResponse.getRequest(),
stringResponse.getStatusCode(),
stringResponse.getHeaders(), transformSecretKey(stringResponse.getValue())));
} catch (JsonProcessingException e) {
return Mono.error(e);
}
}));
}
Mono<Response<SecretKey>> setSecretKey(SecretKey secret, Context context) {
context = context == null ? Context.NONE : context;
Objects.requireNonNull(secret, "The Secret input parameter cannot be null.");
SecretRequestParameters parameters = new SecretRequestParameters()
.setValue(secret.getValue())
.setTags(secret.getProperties().getTags())
.setContentType(secret.getProperties().getContentType())
.setSecretAttributes(new SecretRequestAttributes(secret.getProperties()));
return service.setSecret(vaultUrl, secret.getName(), apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Setting secret - {}", secret.getName()))
.doOnSuccess(response -> logger.verbose("Set secret - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to set secret - {}", secret.getName(), error));
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(plaintext);
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = encryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(encryptParameters.getPlainText())
.setIv(encryptParameters.getIv())
.setAdditionalAuthenticatedData(encryptParameters.getAdditionalAuthenticatedData());
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(ciphertext, "'ciphertext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(ciphertext);
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = decryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(decryptParameters.getCipherText())
.setIv(decryptParameters.getIv())
.setAdditionalAuthenticatedData(decryptParameters.getAdditionalAuthenticatedData())
.setAuthenticationTag(decryptParameters.getAuthenticationTag());
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
KeySignRequest parameters = new KeySignRequest().setAlgorithm(algorithm).setValue(digest);
context = context == null ? Context.NONE : context;
return service.sign(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Signing content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved signed content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to sign content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new SignResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
KeyVerifyRequest parameters =
new KeyVerifyRequest().setAlgorithm(algorithm).setDigest(digest).setSignature(signature);
context = context == null ? Context.NONE : context;
return service.verify(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Verifying content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved verified content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new VerifyResult(response.getValue().getValue(), algorithm, keyId)));
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(key);
context = context == null ? Context.NONE : context;
return service.wrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Wrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved wrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new WrapResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(encryptedKey);
context = context == null ? Context.NONE : context;
return service.unwrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Unwrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved unwrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to unwrap key content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new UnwrapResult(response.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return sign(algorithm, digest, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return verify(algorithm, digest, signature, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
private void unpackId(String keyId) {
if (keyId != null && keyId.length() > 0) {
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
this.vaultUrl = url.getProtocol() + ":
this.keyName = (tokens.length >= 3 ? tokens[2] : null);
this.version = (tokens.length >= 4 ? tokens[3] : null);
} catch (MalformedURLException e) {
e.printStackTrace();
}
}
}
} |
`put` is deprecated for a named `JsonNode`, `set` should be used instead | JsonWebKey transformSecretKey(SecretKey secretKey) throws JsonProcessingException {
ObjectNode rootNode = MAPPER.createObjectNode();
ArrayNode a = MAPPER.createArrayNode();
a.add(KeyOperation.WRAP_KEY.toString());
a.add(KeyOperation.UNWRAP_KEY.toString());
a.add(KeyOperation.ENCRYPT.toString());
a.add(KeyOperation.DECRYPT.toString());
rootNode.put("k", Base64.getUrlDecoder().decode(secretKey.getValue()));
rootNode.put("kid", this.keyId);
rootNode.put("kty", KeyType.OCT.toString());
rootNode.set("key_ops", a);
return MAPPER.treeToValue(rootNode, JsonWebKey.class);
} | rootNode.set("key_ops", a); | JsonWebKey transformSecretKey(SecretKey secretKey) throws JsonProcessingException {
ObjectNode rootNode = MAPPER.createObjectNode();
ArrayNode a = MAPPER.createArrayNode();
a.add(KeyOperation.WRAP_KEY.toString());
a.add(KeyOperation.UNWRAP_KEY.toString());
a.add(KeyOperation.ENCRYPT.toString());
a.add(KeyOperation.DECRYPT.toString());
rootNode.put("k", Base64.getUrlDecoder().decode(secretKey.getValue()));
rootNode.put("kid", this.keyId);
rootNode.put("kty", KeyType.OCT.toString());
rootNode.set("key_ops", a);
return MAPPER.treeToValue(rootNode, JsonWebKey.class);
} | class CryptographyServiceClient {
private static final ObjectMapper MAPPER = new ObjectMapper();
final String apiVersion;
static final String ACCEPT_LANGUAGE = "en-US";
static final String CONTENT_TYPE_HEADER_VALUE = "application/json";
private final ClientLogger logger = new ClientLogger(CryptographyServiceClient.class);
private final CryptographyService service;
private String vaultUrl;
private String version;
private String keyName;
private final String keyId;
CryptographyServiceClient(String keyId, CryptographyService service, CryptographyServiceVersion serviceVersion) {
Objects.requireNonNull(keyId);
unpackId(keyId);
this.keyId = keyId;
this.service = service;
apiVersion = serviceVersion.getVersion();
}
Mono<Response<KeyVaultKey>> getKey(Context context) {
if (version == null) {
version = "";
}
return getKey(keyName, version, context);
}
private Mono<Response<KeyVaultKey>> getKey(String name, String version, Context context) {
context = context == null ? Context.NONE : context;
return service.getKey(vaultUrl, name, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", name))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", name, error));
}
Mono<Response<JsonWebKey>> getSecretKey(Context context) {
return service.getSecret(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", keyName))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", keyName, error))
.flatMap((stringResponse -> {
KeyVaultKey key = null;
try {
return Mono.just(new SimpleResponse<>(stringResponse.getRequest(),
stringResponse.getStatusCode(),
stringResponse.getHeaders(), transformSecretKey(stringResponse.getValue())));
} catch (JsonProcessingException e) {
return Mono.error(e);
}
}));
}
Mono<Response<SecretKey>> setSecretKey(SecretKey secret, Context context) {
context = context == null ? Context.NONE : context;
Objects.requireNonNull(secret, "The Secret input parameter cannot be null.");
SecretRequestParameters parameters = new SecretRequestParameters()
.setValue(secret.getValue())
.setTags(secret.getProperties().getTags())
.setContentType(secret.getProperties().getContentType())
.setSecretAttributes(new SecretRequestAttributes(secret.getProperties()));
return service.setSecret(vaultUrl, secret.getName(), apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Setting secret - {}", secret.getName()))
.doOnSuccess(response -> logger.verbose("Set secret - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to set secret - {}", secret.getName(), error));
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(plaintext);
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = encryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(encryptParameters.getPlainText())
.setIv(encryptParameters.getIv())
.setAdditionalAuthenticatedData(encryptParameters.getAdditionalAuthenticatedData());
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(ciphertext, "'ciphertext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(ciphertext);
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = decryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(decryptParameters.getCipherText())
.setIv(decryptParameters.getIv())
.setAdditionalAuthenticatedData(decryptParameters.getAdditionalAuthenticatedData())
.setAuthenticationTag(decryptParameters.getAuthenticationTag());
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
KeySignRequest parameters = new KeySignRequest().setAlgorithm(algorithm).setValue(digest);
context = context == null ? Context.NONE : context;
return service.sign(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Signing content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved signed content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to sign content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new SignResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
KeyVerifyRequest parameters =
new KeyVerifyRequest().setAlgorithm(algorithm).setDigest(digest).setSignature(signature);
context = context == null ? Context.NONE : context;
return service.verify(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Verifying content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved verified content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new VerifyResult(response.getValue().getValue(), algorithm, keyId)));
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(key);
context = context == null ? Context.NONE : context;
return service.wrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Wrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved wrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new WrapResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(encryptedKey);
context = context == null ? Context.NONE : context;
return service.unwrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Unwrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved unwrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to unwrap key content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new UnwrapResult(response.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return sign(algorithm, digest, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return verify(algorithm, digest, signature, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
private void unpackId(String keyId) {
if (keyId != null && keyId.length() > 0) {
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
this.vaultUrl = url.getProtocol() + ":
this.keyName = (tokens.length >= 3 ? tokens[2] : null);
this.version = (tokens.length >= 4 ? tokens[3] : null);
} catch (MalformedURLException e) {
e.printStackTrace();
}
}
}
} | class CryptographyServiceClient {
private static final ObjectMapper MAPPER = new ObjectMapper();
final String apiVersion;
static final String ACCEPT_LANGUAGE = "en-US";
static final String CONTENT_TYPE_HEADER_VALUE = "application/json";
private final ClientLogger logger = new ClientLogger(CryptographyServiceClient.class);
private final CryptographyService service;
private String vaultUrl;
private String version;
private String keyName;
private final String keyId;
CryptographyServiceClient(String keyId, CryptographyService service, CryptographyServiceVersion serviceVersion) {
Objects.requireNonNull(keyId);
unpackId(keyId);
this.keyId = keyId;
this.service = service;
apiVersion = serviceVersion.getVersion();
}
Mono<Response<KeyVaultKey>> getKey(Context context) {
if (version == null) {
version = "";
}
return getKey(keyName, version, context);
}
private Mono<Response<KeyVaultKey>> getKey(String name, String version, Context context) {
context = context == null ? Context.NONE : context;
return service.getKey(vaultUrl, name, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", name))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", name, error));
}
Mono<Response<JsonWebKey>> getSecretKey(Context context) {
return service.getSecret(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE,
context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Retrieving key - {}", keyName))
.doOnSuccess(response -> logger.verbose("Retrieved key - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to get key - {}", keyName, error))
.flatMap((stringResponse -> {
KeyVaultKey key = null;
try {
return Mono.just(new SimpleResponse<>(stringResponse.getRequest(),
stringResponse.getStatusCode(),
stringResponse.getHeaders(), transformSecretKey(stringResponse.getValue())));
} catch (JsonProcessingException e) {
return Mono.error(e);
}
}));
}
Mono<Response<SecretKey>> setSecretKey(SecretKey secret, Context context) {
context = context == null ? Context.NONE : context;
Objects.requireNonNull(secret, "The Secret input parameter cannot be null.");
SecretRequestParameters parameters = new SecretRequestParameters()
.setValue(secret.getValue())
.setTags(secret.getProperties().getTags())
.setContentType(secret.getProperties().getContentType())
.setSecretAttributes(new SecretRequestAttributes(secret.getProperties()));
return service.setSecret(vaultUrl, secret.getName(), apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Setting secret - {}", secret.getName()))
.doOnSuccess(response -> logger.verbose("Set secret - {}", response.getValue().getName()))
.doOnError(error -> logger.warning("Failed to set secret - {}", secret.getName(), error));
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(plaintext);
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = encryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(encryptParameters.getPlainText())
.setIv(encryptParameters.getIv())
.setAdditionalAuthenticatedData(encryptParameters.getAdditionalAuthenticatedData());
context = context == null ? Context.NONE : context;
return service.encrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Encrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved encrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to encrypt content with algorithm - {}", algorithm, error))
.map(keyOperationResultResponse -> {
KeyOperationResult keyOperationResult = keyOperationResultResponse.getValue();
return new EncryptResult(keyOperationResult.getResult(), algorithm, keyId,
keyOperationResult.getIv(), keyOperationResult.getAuthenticationTag(),
keyOperationResult.getAdditionalAuthenticatedData());
});
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(ciphertext, "'ciphertext' cannot be null.");
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(ciphertext);
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
EncryptionAlgorithm algorithm = decryptParameters.getAlgorithm();
KeyOperationParameters parameters = new KeyOperationParameters()
.setAlgorithm(algorithm)
.setValue(decryptParameters.getCipherText())
.setIv(decryptParameters.getIv())
.setAdditionalAuthenticatedData(decryptParameters.getAdditionalAuthenticatedData())
.setAuthenticationTag(decryptParameters.getAuthenticationTag());
context = context == null ? Context.NONE : context;
return service.decrypt(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Decrypting content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved decrypted content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to decrypt content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse -> Mono.just(
new DecryptResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
KeySignRequest parameters = new KeySignRequest().setAlgorithm(algorithm).setValue(digest);
context = context == null ? Context.NONE : context;
return service.sign(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Signing content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved signed content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to sign content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new SignResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
KeyVerifyRequest parameters =
new KeyVerifyRequest().setAlgorithm(algorithm).setDigest(digest).setSignature(signature);
context = context == null ? Context.NONE : context;
return service.verify(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Verifying content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved verified content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new VerifyResult(response.getValue().getValue(), algorithm, keyId)));
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(key);
context = context == null ? Context.NONE : context;
return service.wrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Wrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved wrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to verify content with algorithm - {}", algorithm, error))
.flatMap(keyOperationResultResponse ->
Mono.just(new WrapResult(keyOperationResultResponse.getValue().getResult(), algorithm, keyId)));
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
KeyWrapUnwrapRequest parameters = new KeyWrapUnwrapRequest()
.setAlgorithm(algorithm)
.setValue(encryptedKey);
context = context == null ? Context.NONE : context;
return service.unwrapKey(vaultUrl, keyName, version, apiVersion, ACCEPT_LANGUAGE, parameters,
CONTENT_TYPE_HEADER_VALUE, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))
.doOnRequest(ignored -> logger.verbose("Unwrapping key content with algorithm - {}", algorithm))
.doOnSuccess(response -> logger.verbose("Retrieved unwrapped key content with algorithm - {}", algorithm))
.doOnError(error -> logger.warning("Failed to unwrap key content with algorithm - {}", algorithm, error))
.flatMap(response -> Mono.just(new UnwrapResult(response.getValue().getResult(), algorithm, keyId)));
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return sign(algorithm, digest, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
try {
HashAlgorithm hashAlgorithm = SignatureHashResolver.DEFAULT.get(algorithm);
MessageDigest md = MessageDigest.getInstance(hashAlgorithm.toString());
md.update(data);
byte[] digest = md.digest();
return verify(algorithm, digest, signature, context);
} catch (NoSuchAlgorithmException e) {
return Mono.error(e);
}
}
private void unpackId(String keyId) {
if (keyId != null && keyId.length() > 0) {
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
this.vaultUrl = url.getProtocol() + ":
this.keyName = (tokens.length >= 3 ? tokens[2] : null);
this.version = (tokens.length >= 4 ? tokens[3] : null);
} catch (MalformedURLException e) {
e.printStackTrace();
}
}
}
} |
We should check StorageAccountManager bean here and for below test. | public void testEventHubOperationProvidedNotStorageUnderMSI() {
this.contextRunner.withUserConfiguration(
TestConfigWithAzureResourceManagerAndConnectionProvider.class,
AzureEventHubAutoConfiguration.class)
.withPropertyValues(
AZURE_PROPERTY_PREFIX + "resource-group=rg1",
AZURE_PROPERTY_PREFIX + "msi-enabled=true",
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1",
AZURE_PROPERTY_PREFIX + "subscription-id=sub"
)
.run(context -> {
assertThat(context).hasSingleBean(EventHubNamespaceManager.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
});
} | assertThat(context).hasSingleBean(EventHubNamespaceManager.class); | public void testEventHubOperationProvidedNotStorageUnderMSI() {
this.contextRunner.withUserConfiguration(
TestConfigWithAzureResourceManagerAndConnectionProvider.class,
AzureEventHubAutoConfiguration.class)
.withPropertyValues(
AZURE_PROPERTY_PREFIX + "resource-group=rg1",
AZURE_PROPERTY_PREFIX + "msi-enabled=true",
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1",
AZURE_PROPERTY_PREFIX + "subscription-id=sub"
)
.run(context -> {
assertThat(context).hasSingleBean(EventHubNamespaceManager.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
assertThat(context).doesNotHaveBean(StorageAccountManager.class);
});
} | class AzureEventHubAutoConfigurationTest {
private static final String EVENT_HUB_PROPERTY_PREFIX = "spring.cloud.azure.eventhub.";
private static final String AZURE_PROPERTY_PREFIX = "spring.cloud.azure.";
private ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureEventHubAutoConfiguration.class));
@Test
public void testAzureEventHubDisabled() {
this.contextRunner.withPropertyValues(EVENT_HUB_PROPERTY_PREFIX + "enabled=false")
.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubProperties.class));
}
@Test
public void testWithoutEventHubClient() {
this.contextRunner.withClassLoader(new FilteredClassLoader(EventHubConsumerAsyncClient.class))
.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubProperties.class));
}
@Test
public void testAzureEventHubPropertiesStorageAccountIllegal() {
this.contextRunner.withPropertyValues(EVENT_HUB_PROPERTY_PREFIX + "checkpoint-storage-account=1")
.run(context -> assertThrows(IllegalStateException.class,
() -> context.getBean(AzureEventHubProperties.class)));
}
@Test
public void testAzureEventHubPropertiesConfigured() {
this.contextRunner.withPropertyValues(
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1",
EVENT_HUB_PROPERTY_PREFIX + "checkpoint-storage-account=sa1",
EVENT_HUB_PROPERTY_PREFIX + "connection-string=str1")
.run(context -> {
assertThat(context).hasSingleBean(AzureEventHubProperties.class);
assertThat(context.getBean(AzureEventHubProperties.class).getNamespace()).isEqualTo(
"ns1");
assertThat(context.getBean(AzureEventHubProperties.class).getConnectionString()).isEqualTo("str1");
assertThat(context.getBean(AzureEventHubProperties.class).getCheckpointStorageAccount()).isEqualTo("sa1");
});
}
@Test
public void testConnectionStringProvided() {
this.contextRunner.withPropertyValues(EVENT_HUB_PROPERTY_PREFIX + "connection-string=str1")
.run(context -> {
assertThat(context.getBean(EventHubConnectionStringProvider.class).getConnectionString()).isEqualTo("str1");
assertThat(context).hasSingleBean(EventHubClientFactory.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
assertThat(context).doesNotHaveBean(EventHubNamespaceManager.class);
assertThat(context).doesNotHaveBean(StorageAccountManager.class);
});
}
@Test
public void testResourceManagerProvided() {
this.contextRunner.withUserConfiguration(
TestConfigWithAzureResourceManagerAndConnectionProvider.class,
AzureEventHubAutoConfiguration.class)
.withPropertyValues(
AZURE_PROPERTY_PREFIX + "resource-group=rg1",
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1",
EVENT_HUB_PROPERTY_PREFIX + "checkpoint-storage-account=sa1"
)
.run(context -> {
assertThat(context).hasSingleBean(EventHubClientFactory.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
assertThat(context).hasSingleBean(EventHubNamespaceManager.class);
assertThat(context).hasSingleBean(StorageAccountManager.class);
});
}
@Test
public void testEventHubOperationProvidedNotStorageUnder () {
this.contextRunner.withUserConfiguration(
TestConfigWithAzureResourceManagerAndConnectionProvider.class,
AzureEventHubAutoConfiguration.class)
.withPropertyValues(
AZURE_PROPERTY_PREFIX + "resource-group=rg1",
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1"
)
.run(context -> {
assertThat(context).hasSingleBean(EventHubNamespaceManager.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
});
}
@Test
@Configuration
@EnableConfigurationProperties(AzureProperties.class)
public static class TestConfigWithAzureResourceManagerAndConnectionProvider {
@Bean
public AzureResourceManager azureResourceManager() {
final AzureResourceManager mockResourceManager = mock(AzureResourceManager.class);
final StorageManager mockStorageManager = mock(StorageManager.class);
final StorageAccounts mockStorageAccounts = mock(StorageAccounts.class);
final StorageAccount mockStorageAccount = mock(StorageAccount.class);
final List<StorageAccountKey> mockStorageAccountKeys = singletonList(mock(StorageAccountKey.class));
when(mockResourceManager.storageAccounts()).thenReturn(mockStorageAccounts);
when(mockStorageAccounts.getByResourceGroup(anyString(), anyString())).thenReturn(mockStorageAccount);
when(mockStorageAccount.getKeys()).thenReturn(mockStorageAccountKeys);
when(mockStorageAccount.manager()).thenReturn(mockStorageManager);
when(mockStorageManager.environment()).thenReturn(AzureEnvironment.AZURE);
return mockResourceManager;
}
@Bean
public EventHubConnectionStringProvider eventHubConnectionStringProvider() {
return new EventHubConnectionStringProvider("fake-string");
}
}
} | class AzureEventHubAutoConfigurationTest {
private static final String EVENT_HUB_PROPERTY_PREFIX = "spring.cloud.azure.eventhub.";
private static final String AZURE_PROPERTY_PREFIX = "spring.cloud.azure.";
private ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureEventHubAutoConfiguration.class));
@Test
public void testAzureEventHubDisabled() {
this.contextRunner.withPropertyValues(EVENT_HUB_PROPERTY_PREFIX + "enabled=false")
.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubProperties.class));
}
@Test
public void testWithoutEventHubClient() {
this.contextRunner.withClassLoader(new FilteredClassLoader(EventHubConsumerAsyncClient.class))
.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubProperties.class));
}
@Test
public void testAzureEventHubPropertiesStorageAccountIllegal() {
this.contextRunner.withPropertyValues(EVENT_HUB_PROPERTY_PREFIX + "checkpoint-storage-account=1")
.run(context -> assertThrows(IllegalStateException.class,
() -> context.getBean(AzureEventHubProperties.class)));
}
@Test
public void testAzureEventHubPropertiesConfigured() {
this.contextRunner.withPropertyValues(
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1",
EVENT_HUB_PROPERTY_PREFIX + "checkpoint-storage-account=sa1",
EVENT_HUB_PROPERTY_PREFIX + "connection-string=str1")
.run(context -> {
assertThat(context).hasSingleBean(AzureEventHubProperties.class);
assertThat(context.getBean(AzureEventHubProperties.class).getNamespace()).isEqualTo(
"ns1");
assertThat(context.getBean(AzureEventHubProperties.class).getConnectionString()).isEqualTo("str1");
assertThat(context.getBean(AzureEventHubProperties.class).getCheckpointStorageAccount()).isEqualTo("sa1");
});
}
@Test
public void testConnectionStringProvided() {
this.contextRunner.withPropertyValues(EVENT_HUB_PROPERTY_PREFIX + "connection-string=str1")
.run(context -> {
assertThat(context.getBean(EventHubConnectionStringProvider.class).getConnectionString()).isEqualTo("str1");
assertThat(context).hasSingleBean(EventHubClientFactory.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
assertThat(context).doesNotHaveBean(EventHubNamespaceManager.class);
assertThat(context).doesNotHaveBean(StorageAccountManager.class);
});
}
@Test
public void testResourceManagerProvided() {
this.contextRunner.withUserConfiguration(
TestConfigWithAzureResourceManagerAndConnectionProvider.class,
AzureEventHubAutoConfiguration.class)
.withPropertyValues(
AZURE_PROPERTY_PREFIX + "resource-group=rg1",
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1",
EVENT_HUB_PROPERTY_PREFIX + "checkpoint-storage-account=sa1"
)
.run(context -> {
assertThat(context).hasSingleBean(EventHubClientFactory.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
assertThat(context).hasSingleBean(EventHubNamespaceManager.class);
assertThat(context).hasSingleBean(StorageAccountManager.class);
});
}
@Test
public void testEventHubOperationProvidedNotStorageUnderSP() {
this.contextRunner.withUserConfiguration(
TestConfigWithAzureResourceManagerAndConnectionProvider.class,
AzureEventHubAutoConfiguration.class)
.withPropertyValues(
AZURE_PROPERTY_PREFIX + "resource-group=rg1",
EVENT_HUB_PROPERTY_PREFIX + "namespace=ns1"
)
.run(context -> {
assertThat(context).hasSingleBean(EventHubNamespaceManager.class);
assertThat(context).hasSingleBean(EventHubOperation.class);
assertThat(context).doesNotHaveBean(StorageAccountManager.class);
});
}
@Test
@Configuration
@EnableConfigurationProperties(AzureProperties.class)
public static class TestConfigWithAzureResourceManagerAndConnectionProvider {
@Bean
public AzureResourceManager azureResourceManager() {
final AzureResourceManager mockResourceManager = mock(AzureResourceManager.class);
final StorageManager mockStorageManager = mock(StorageManager.class);
final StorageAccounts mockStorageAccounts = mock(StorageAccounts.class);
final StorageAccount mockStorageAccount = mock(StorageAccount.class);
final List<StorageAccountKey> mockStorageAccountKeys = singletonList(mock(StorageAccountKey.class));
when(mockResourceManager.storageAccounts()).thenReturn(mockStorageAccounts);
when(mockStorageAccounts.getByResourceGroup(anyString(), anyString())).thenReturn(mockStorageAccount);
when(mockStorageAccount.getKeys()).thenReturn(mockStorageAccountKeys);
when(mockStorageAccount.manager()).thenReturn(mockStorageManager);
when(mockStorageManager.environment()).thenReturn(AzureEnvironment.AZURE);
return mockResourceManager;
}
@Bean
public EventHubConnectionStringProvider eventHubConnectionStringProvider() {
return new EventHubConnectionStringProvider("fake-string");
}
}
} |
``` private static final List<String> SUPPORTED_URI_PREFIX = Arrays.asList("foo", "bar", ...); ``` | private void validateUri() {
if (StringUtils.startsWithIgnoreCase(uri, "mongodb:
throw new IllegalArgumentException("'azure.cosmos.uri' does not support mongodb, to work with mongodb, please use spring-data-mongodb instead.");
}
} | if (StringUtils.startsWithIgnoreCase(uri, "mongodb: | private void validateUri() {
if (!Pattern.matches(URI_REGEX, uri)) {
throw new IllegalArgumentException("the uri's pattern specified in 'azure.cosmos.uri' is not supported, "
+ "only sql/core api is supported, please check https:
+ "for more info.");
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
public static final String URI_REGEX = "http[s]{0,1}:
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} |
How about using `allow list` instead of `blocking list`? | private void validateUri() {
if (StringUtils.startsWithIgnoreCase(uri, "mongodb:
throw new IllegalArgumentException("'azure.cosmos.uri' does not support mongodb, to work with mongodb, please use spring-data-mongodb instead.");
}
} | if (StringUtils.startsWithIgnoreCase(uri, "mongodb: | private void validateUri() {
if (!Pattern.matches(URI_REGEX, uri)) {
throw new IllegalArgumentException("the uri's pattern specified in 'azure.cosmos.uri' is not supported, "
+ "only sql/core api is supported, please check https:
+ "for more info.");
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
public static final String URI_REGEX = "http[s]{0,1}:
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} |
which blocking list? I don't understand you. | private void validateUri() {
if (StringUtils.startsWithIgnoreCase(uri, "mongodb:
throw new IllegalArgumentException("'azure.cosmos.uri' does not support mongodb, to work with mongodb, please use spring-data-mongodb instead.");
}
} | if (StringUtils.startsWithIgnoreCase(uri, "mongodb: | private void validateUri() {
if (!Pattern.matches(URI_REGEX, uri)) {
throw new IllegalArgumentException("the uri's pattern specified in 'azure.cosmos.uri' is not supported, "
+ "only sql/core api is supported, please check https:
+ "for more info.");
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
public static final String URI_REGEX = "http[s]{0,1}:
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} |
Your current implementation is blocking `mongodb://`. | private void validateUri() {
if (StringUtils.startsWithIgnoreCase(uri, "mongodb:
throw new IllegalArgumentException("'azure.cosmos.uri' does not support mongodb, to work with mongodb, please use spring-data-mongodb instead.");
}
} | if (StringUtils.startsWithIgnoreCase(uri, "mongodb: | private void validateUri() {
if (!Pattern.matches(URI_REGEX, uri)) {
throw new IllegalArgumentException("the uri's pattern specified in 'azure.cosmos.uri' is not supported, "
+ "only sql/core api is supported, please check https:
+ "for more info.");
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
public static final String URI_REGEX = "http[s]{0,1}:
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} |
can you give me an example?😂 | private void validateUri() {
if (StringUtils.startsWithIgnoreCase(uri, "mongodb:
throw new IllegalArgumentException("'azure.cosmos.uri' does not support mongodb, to work with mongodb, please use spring-data-mongodb instead.");
}
} | if (StringUtils.startsWithIgnoreCase(uri, "mongodb: | private void validateUri() {
if (!Pattern.matches(URI_REGEX, uri)) {
throw new IllegalArgumentException("the uri's pattern specified in 'azure.cosmos.uri' is not supported, "
+ "only sql/core api is supported, please check https:
+ "for more info.");
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} | class CosmosProperties implements InitializingBean {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosProperties.class);
public static final String URI_REGEX = "http[s]{0,1}:
/**
* Document DB URI.
*/
@NotEmpty
private String uri;
/**
* Document DB key.
*/
@NotEmpty
private String key;
/**
* Document DB consistency level.
*/
private ConsistencyLevel consistencyLevel;
/**
* Document DB database name.
*/
@NotEmpty
private String database;
/**
* Populate Diagnostics Strings and Query metrics
*/
private boolean populateQueryMetrics;
/**
* Whether allow Microsoft to collect telemetry data.
*/
private boolean allowTelemetry = true;
/**
* Represents the connection mode to be used by the client in the Azure Cosmos DB database service.
*/
private ConnectionMode connectionMode;
/**
* Response Diagnostics processor
* Default implementation is to log the response diagnostics string
*/
private ResponseDiagnosticsProcessor responseDiagnosticsProcessor =
responseDiagnostics -> {
if (populateQueryMetrics) {
LOGGER.info("Response Diagnostics {}", responseDiagnostics);
}
};
@Override
public void afterPropertiesSet() {
validateUri();
}
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getDatabase() {
return database;
}
public void setDatabase(String databaseName) {
this.database = databaseName;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public void setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "Deprecate the telemetry endpoint and use HTTP header User Agent instead.")
public boolean isAllowTelemetry() {
return allowTelemetry;
}
public void setAllowTelemetry(boolean allowTelemetry) {
this.allowTelemetry = allowTelemetry;
}
public boolean isPopulateQueryMetrics() {
return populateQueryMetrics;
}
public void setPopulateQueryMetrics(boolean populateQueryMetrics) {
this.populateQueryMetrics = populateQueryMetrics;
}
public ResponseDiagnosticsProcessor getResponseDiagnosticsProcessor() {
return responseDiagnosticsProcessor;
}
public void setResponseDiagnosticsProcessor(ResponseDiagnosticsProcessor responseDiagnosticsProcessor) {
this.responseDiagnosticsProcessor = responseDiagnosticsProcessor;
}
public ConnectionMode getConnectionMode() {
return connectionMode;
}
public void setConnectionMode(ConnectionMode connectionMode) {
this.connectionMode = connectionMode;
}
} |
I'd still set `applicationId` if the passed value is null. It'll allow for resetting of the `applicationId`. | public ClientOptions setApplicationId(String applicationId) {
if (!CoreUtils.isNullOrEmpty(applicationId)) {
if (applicationId.length() > MAX_APPLICATION_ID_LENGTH) {
throw logger.logExceptionAsError(new IllegalArgumentException(INVALID_APPLICATION_ID_LENGTH));
} else if (applicationId.contains(" ")) {
throw logger.logExceptionAsError(new IllegalArgumentException(INVALID_APPLICATION_ID_SPACE));
} else {
this.applicationId = applicationId;
}
}
return this;
} | this.applicationId = applicationId; | public ClientOptions setApplicationId(String applicationId) {
if (!CoreUtils.isNullOrEmpty(applicationId)) {
if (applicationId.length() > MAX_APPLICATION_ID_LENGTH) {
throw logger.logExceptionAsError(new IllegalArgumentException(INVALID_APPLICATION_ID_LENGTH));
} else if (applicationId.contains(" ")) {
throw logger.logExceptionAsError(new IllegalArgumentException(INVALID_APPLICATION_ID_SPACE));
}
}
this.applicationId = applicationId;
return this;
} | class ClientOptions {
private static final int MAX_APPLICATION_ID_LENGTH = 24;
private static final String INVALID_APPLICATION_ID_LENGTH = "'applicationId' length cannot be greater than "
+ MAX_APPLICATION_ID_LENGTH;
private static final String INVALID_APPLICATION_ID_SPACE = "'applicationId' cannot contain spaces.";
private final ClientLogger logger = new ClientLogger(ClientOptions.class);
private Iterable<Header> headers;
private String applicationId;
/**
* Gets the application ID.
*
* @return The application ID.
*/
public String getApplicationId() {
return applicationId;
}
/**
* Sets the application ID.
* <p>
* The {@code applicationId} is used to configure {@link UserAgentPolicy} for telemetry/monitoring purposes.
* <p>
* See <a href="https:
* policy</a> for additional information.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create ClientOptions with application ID 'myApplicationId'</p>
*
* {@codesnippet com.azure.core.util.ClientOptions.setApplicationId
*
* @param applicationId The application ID.
*
* @return The updated ClientOptions object.
*
* @throws IllegalArgumentException If {@code applicationId} contains spaces or larger than 24 in length.
*/
/**
* Sets the {@link Header Headers}.
* <p>
* The passed headers are applied to each request sent with the client.
* <p>
* This overwrites all previously set headers.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create ClientOptions with Header 'myCustomHeader':'myStaticValue'</p>
*
* {@codesnippet com.azure.core.util.ClientOptions.setHeaders
*
* @param headers The headers.
* @return The updated ClientOptions object.
*/
public ClientOptions setHeaders(Iterable<Header> headers) {
this.headers = headers;
return this;
}
/**
* Gets the {@link Header Headers}.
*
* @return The {@link Header Headers}, if headers weren't set previously an empty list is returned.
*/
public Iterable<Header> getHeaders() {
if (headers == null) {
return Collections.emptyList();
}
return headers;
}
} | class ClientOptions {
private static final int MAX_APPLICATION_ID_LENGTH = 24;
private static final String INVALID_APPLICATION_ID_LENGTH = "'applicationId' length cannot be greater than "
+ MAX_APPLICATION_ID_LENGTH;
private static final String INVALID_APPLICATION_ID_SPACE = "'applicationId' cannot contain spaces.";
private final ClientLogger logger = new ClientLogger(ClientOptions.class);
private Iterable<Header> headers;
private String applicationId;
/**
* Gets the application ID.
*
* @return The application ID.
*/
public String getApplicationId() {
return applicationId;
}
/**
* Sets the application ID.
* <p>
* The {@code applicationId} is used to configure {@link UserAgentPolicy} for telemetry/monitoring purposes.
* <p>
* See <a href="https:
* policy</a> for additional information.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create ClientOptions with application ID 'myApplicationId'</p>
*
* {@codesnippet com.azure.core.util.ClientOptions.setApplicationId
*
* @param applicationId The application ID.
*
* @return The updated ClientOptions object.
*
* @throws IllegalArgumentException If {@code applicationId} contains spaces or is larger than 24 characters in
* length.
*/
/**
* Sets the {@link Header Headers}.
* <p>
* The passed headers are applied to each request sent with the client.
* <p>
* This overwrites all previously set headers.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create ClientOptions with Header 'myCustomHeader':'myStaticValue'</p>
*
* {@codesnippet com.azure.core.util.ClientOptions.setHeaders
*
* @param headers The headers.
* @return The updated ClientOptions object.
*/
public ClientOptions setHeaders(Iterable<Header> headers) {
this.headers = headers;
return this;
}
/**
* Gets the {@link Header Headers}.
*
* @return The {@link Header Headers}, if headers weren't set previously an empty list is returned.
*/
public Iterable<Header> getHeaders() {
if (headers == null) {
return Collections.emptyList();
}
return headers;
}
} |
`inferred == configured || inferred == AADApplicationType.WEB_APPLICATION_AND_RESOURCE_SERVER` | private boolean isValidApplicationType(AADApplicationType configured, AADApplicationType inferred) {
return inferred == configured || inferred == AADApplicationType.RESOURCE_SERVER_WITH_OBO;
} | return inferred == configured || inferred == AADApplicationType.RESOURCE_SERVER_WITH_OBO; | private boolean isValidApplicationType(AADApplicationType configured, AADApplicationType inferred) {
return inferred == configured || inferred == AADApplicationType.RESOURCE_SERVER_WITH_OBO;
} | class UserGroupProperties {
private final Log logger = LogFactory.getLog(UserGroupProperties.class);
/**
* Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph
* API Call.
*/
private List<String> allowedGroupNames = new ArrayList<>();
private Set<String> allowedGroupIds = new HashSet<>();
/**
* enableFullList is used to control whether to list all group id, default is false
*/
private Boolean enableFullList = false;
public Set<String> getAllowedGroupIds() {
return allowedGroupIds;
}
/**
* Set the allowed group ids.
*
* @param allowedGroupIds Allowed group ids.
*/
public void setAllowedGroupIds(Set<String> allowedGroupIds) {
this.allowedGroupIds = allowedGroupIds;
}
public List<String> getAllowedGroupNames() {
return allowedGroupNames;
}
public void setAllowedGroupNames(List<String> allowedGroupNames) {
this.allowedGroupNames = allowedGroupNames;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "enable-full-list is not easy to understand.",
replacement = "allowed-group-ids: all")
public Boolean getEnableFullList() {
return enableFullList;
}
@Deprecated
public void setEnableFullList(Boolean enableFullList) {
logger.warn(" 'azure.activedirectory.user-group.enable-full-list' property detected! "
+ "Use 'azure.activedirectory.user-group.allowed-group-ids: all' instead!");
this.enableFullList = enableFullList;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "In order to distinguish between allowed-group-ids and allowed-group-names, set allowed-groups "
+ "deprecated.",
replacement = "azure.activedirectory.user-group.allowed-group-names")
public List<String> getAllowedGroups() {
return allowedGroupNames;
}
@Deprecated
public void setAllowedGroups(List<String> allowedGroups) {
logger.warn(" 'azure.activedirectory.user-group.allowed-groups' property detected! " + " Use 'azure"
+ ".activedirectory.user-group.allowed-group-names' instead!");
this.allowedGroupNames = allowedGroups;
}
} | class UserGroupProperties {
private final Log logger = LogFactory.getLog(UserGroupProperties.class);
/**
* Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph
* API Call.
*/
private List<String> allowedGroupNames = new ArrayList<>();
private Set<String> allowedGroupIds = new HashSet<>();
/**
* enableFullList is used to control whether to list all group id, default is false
*/
private Boolean enableFullList = false;
public Set<String> getAllowedGroupIds() {
return allowedGroupIds;
}
/**
* Set the allowed group ids.
*
* @param allowedGroupIds Allowed group ids.
*/
public void setAllowedGroupIds(Set<String> allowedGroupIds) {
this.allowedGroupIds = allowedGroupIds;
}
public List<String> getAllowedGroupNames() {
return allowedGroupNames;
}
public void setAllowedGroupNames(List<String> allowedGroupNames) {
this.allowedGroupNames = allowedGroupNames;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "enable-full-list is not easy to understand.",
replacement = "allowed-group-ids: all")
public Boolean getEnableFullList() {
return enableFullList;
}
@Deprecated
public void setEnableFullList(Boolean enableFullList) {
logger.warn(" 'azure.activedirectory.user-group.enable-full-list' property detected! "
+ "Use 'azure.activedirectory.user-group.allowed-group-ids: all' instead!");
this.enableFullList = enableFullList;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "In order to distinguish between allowed-group-ids and allowed-group-names, set allowed-groups "
+ "deprecated.",
replacement = "azure.activedirectory.user-group.allowed-group-names")
public List<String> getAllowedGroups() {
return allowedGroupNames;
}
@Deprecated
public void setAllowedGroups(List<String> allowedGroups) {
logger.warn(" 'azure.activedirectory.user-group.allowed-groups' property detected! " + " Use 'azure"
+ ".activedirectory.user-group.allowed-group-names' instead!");
this.allowedGroupNames = allowedGroups;
}
} |
Maybe it should contains `graph`. Only `delegatedByAzure` type will not be contained here. `graph`'s type is `authorizationCode` | public void onDemandGraphClientConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AUTHORIZATION_CODE.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Arrays.asList(azure), clients);
});
} | assertEquals(Arrays.asList(azure), clients); | public void onDemandGraphClientConfiguredTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AUTHORIZATION_CODE.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Arrays.asList(graph, azure), clients);
});
} | class AADClientRegistrationRepositoryTest {
@Test
public void noClientsConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(1, clients.size());
assertEquals(azure, clients.get(0));
});
}
@Test
public void azureClientConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.azure.scopes = Azure.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
public void graphClientConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AZURE_DELEGATED.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Collections.singletonList("Graph.Scope")), graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
@Test
public void clientWithClientCredentialsPermissions() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = fakeValue:/.default",
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials"
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
assertEquals(repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID).getAuthorizationGrantType(),
AuthorizationGrantType.AUTHORIZATION_CODE);
assertEquals(repository.findByRegistrationId("graph").getAuthorizationGrantType(),
AuthorizationGrantType.CLIENT_CREDENTIALS);
});
}
@Test
public void clientWhichIsNotAuthorizationCodeButOnDemandExceptionTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context ->
assertThrows(IllegalStateException.class, () -> context.getBean(AADAuthenticationProperties.class))
);
}
@Test
public void azureClientEndpointTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"https:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/", azure.getRedirectUri());
});
}
@Test
public void customizeUriTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.base-uri = http:
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"http:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
});
}
@Test
public void testNoGroupIdAndGroupNameConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
});
}
@Test
public void testGroupNameConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues("azure.activedirectory.user-group.allowed-group-names = group1, group2")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupIdConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupNameAndGroupIdConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-names = group1, group2",
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.office.scopes = "
+ "https:
"azure.activedirectory.authorization-clients.arm.scopes = "
+ "https:
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
int resourceServerCountInAuthCode = resourceServerCount(azure.getScopes());
assertTrue(resourceServerCountInAuthCode > 1);
int resourceServerCountInAccessToken =
resourceServerCount(repository.getAzureClientAccessTokenScopes());
assertTrue(resourceServerCountInAccessToken != 0);
});
}
@Disabled
@Test
public void noConfigurationOnMissingRequiredProperties() {
WebApplicationContextRunnerUtils
.getContextRunner()
.run(context -> {
assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class);
assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class);
assertThat(context).doesNotHaveBean(OAuth2UserService.class);
});
}
@Test
public void resourceServerCountTest() {
Set<String> scopes = new HashSet<>();
assertEquals(resourceServerCount(scopes), 0);
scopes.add("openid");
scopes.add("profile");
scopes.add("offline_access");
assertEquals(resourceServerCount(scopes), 0);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} | class AADClientRegistrationRepositoryTest {
@Test
public void noClientsConfiguredTest() {
webApplicationContextRunner()
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(1, clients.size());
assertEquals(azure, clients.get(0));
});
}
@Test
public void azureClientConfiguredTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.azure.scopes = Azure.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
public void graphClientConfiguredTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AZURE_DELEGATED.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Collections.singletonList("Graph.Scope")), graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
@Test
public void clientWithClientCredentialsPermissions() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = fakeValue:/.default",
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials"
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
assertEquals(repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID).getAuthorizationGrantType(),
AuthorizationGrantType.AUTHORIZATION_CODE);
assertEquals(repository.findByRegistrationId("graph").getAuthorizationGrantType(),
AuthorizationGrantType.CLIENT_CREDENTIALS);
});
}
@Test
public void clientWhichIsNotAuthorizationCodeButOnDemandExceptionTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context ->
assertThrows(IllegalStateException.class, () -> context.getBean(AADAuthenticationProperties.class))
);
}
@Test
public void azureClientEndpointTest() {
webApplicationContextRunner()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"https:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/", azure.getRedirectUri());
});
}
@Test
public void customizeUriTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.base-uri = http:
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"http:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
});
}
@Test
public void testNoGroupIdAndGroupNameConfigured() {
webApplicationContextRunner()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
});
}
@Test
public void testGroupNameConfigured() {
webApplicationContextRunner()
.withPropertyValues("azure.activedirectory.user-group.allowed-group-names = group1, group2")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupIdConfigured() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupNameAndGroupIdConfigured() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-names = group1, group2",
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.office.scopes = "
+ "https:
"azure.activedirectory.authorization-clients.arm.scopes = "
+ "https:
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
int resourceServerCountInAuthCode = resourceServerCount(azure.getScopes());
assertTrue(resourceServerCountInAuthCode > 1);
int resourceServerCountInAccessToken =
resourceServerCount(repository.getAzureClientAccessTokenScopes());
assertTrue(resourceServerCountInAccessToken != 0);
});
}
@Disabled
@Test
public void noConfigurationOnMissingRequiredProperties() {
oauthClientRunner()
.run(context -> {
assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class);
assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class);
assertThat(context).doesNotHaveBean(OAuth2UserService.class);
});
}
@Test
public void resourceServerCountTest() {
Set<String> scopes = new HashSet<>();
assertEquals(resourceServerCount(scopes), 0);
scopes.add("openid");
scopes.add("profile");
scopes.add("offline_access");
assertEquals(resourceServerCount(scopes), 0);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} |
inferred will not be AADApplicationType.WEB_APPLICATION_AND_RESOURCE_SERVER | private boolean isValidApplicationType(AADApplicationType configured, AADApplicationType inferred) {
return inferred == configured || inferred == AADApplicationType.RESOURCE_SERVER_WITH_OBO;
} | return inferred == configured || inferred == AADApplicationType.RESOURCE_SERVER_WITH_OBO; | private boolean isValidApplicationType(AADApplicationType configured, AADApplicationType inferred) {
return inferred == configured || inferred == AADApplicationType.RESOURCE_SERVER_WITH_OBO;
} | class UserGroupProperties {
private final Log logger = LogFactory.getLog(UserGroupProperties.class);
/**
* Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph
* API Call.
*/
private List<String> allowedGroupNames = new ArrayList<>();
private Set<String> allowedGroupIds = new HashSet<>();
/**
* enableFullList is used to control whether to list all group id, default is false
*/
private Boolean enableFullList = false;
public Set<String> getAllowedGroupIds() {
return allowedGroupIds;
}
/**
* Set the allowed group ids.
*
* @param allowedGroupIds Allowed group ids.
*/
public void setAllowedGroupIds(Set<String> allowedGroupIds) {
this.allowedGroupIds = allowedGroupIds;
}
public List<String> getAllowedGroupNames() {
return allowedGroupNames;
}
public void setAllowedGroupNames(List<String> allowedGroupNames) {
this.allowedGroupNames = allowedGroupNames;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "enable-full-list is not easy to understand.",
replacement = "allowed-group-ids: all")
public Boolean getEnableFullList() {
return enableFullList;
}
@Deprecated
public void setEnableFullList(Boolean enableFullList) {
logger.warn(" 'azure.activedirectory.user-group.enable-full-list' property detected! "
+ "Use 'azure.activedirectory.user-group.allowed-group-ids: all' instead!");
this.enableFullList = enableFullList;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "In order to distinguish between allowed-group-ids and allowed-group-names, set allowed-groups "
+ "deprecated.",
replacement = "azure.activedirectory.user-group.allowed-group-names")
public List<String> getAllowedGroups() {
return allowedGroupNames;
}
@Deprecated
public void setAllowedGroups(List<String> allowedGroups) {
logger.warn(" 'azure.activedirectory.user-group.allowed-groups' property detected! " + " Use 'azure"
+ ".activedirectory.user-group.allowed-group-names' instead!");
this.allowedGroupNames = allowedGroups;
}
} | class UserGroupProperties {
private final Log logger = LogFactory.getLog(UserGroupProperties.class);
/**
* Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph
* API Call.
*/
private List<String> allowedGroupNames = new ArrayList<>();
private Set<String> allowedGroupIds = new HashSet<>();
/**
* enableFullList is used to control whether to list all group id, default is false
*/
private Boolean enableFullList = false;
public Set<String> getAllowedGroupIds() {
return allowedGroupIds;
}
/**
* Set the allowed group ids.
*
* @param allowedGroupIds Allowed group ids.
*/
public void setAllowedGroupIds(Set<String> allowedGroupIds) {
this.allowedGroupIds = allowedGroupIds;
}
public List<String> getAllowedGroupNames() {
return allowedGroupNames;
}
public void setAllowedGroupNames(List<String> allowedGroupNames) {
this.allowedGroupNames = allowedGroupNames;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "enable-full-list is not easy to understand.",
replacement = "allowed-group-ids: all")
public Boolean getEnableFullList() {
return enableFullList;
}
@Deprecated
public void setEnableFullList(Boolean enableFullList) {
logger.warn(" 'azure.activedirectory.user-group.enable-full-list' property detected! "
+ "Use 'azure.activedirectory.user-group.allowed-group-ids: all' instead!");
this.enableFullList = enableFullList;
}
@Deprecated
@DeprecatedConfigurationProperty(
reason = "In order to distinguish between allowed-group-ids and allowed-group-names, set allowed-groups "
+ "deprecated.",
replacement = "azure.activedirectory.user-group.allowed-group-names")
public List<String> getAllowedGroups() {
return allowedGroupNames;
}
@Deprecated
public void setAllowedGroups(List<String> allowedGroups) {
logger.warn(" 'azure.activedirectory.user-group.allowed-groups' property detected! " + " Use 'azure"
+ ".activedirectory.user-group.allowed-group-names' instead!");
this.allowedGroupNames = allowedGroups;
}
} |
I add the on-demand filter, the on-demand of the graph client is true, if we should remove the filter, then I will revert the change. | public void onDemandGraphClientConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AUTHORIZATION_CODE.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Arrays.asList(azure), clients);
});
} | assertEquals(Arrays.asList(azure), clients); | public void onDemandGraphClientConfiguredTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AUTHORIZATION_CODE.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Arrays.asList(graph, azure), clients);
});
} | class AADClientRegistrationRepositoryTest {
@Test
public void noClientsConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(1, clients.size());
assertEquals(azure, clients.get(0));
});
}
@Test
public void azureClientConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.azure.scopes = Azure.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
public void graphClientConfiguredTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AZURE_DELEGATED.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Collections.singletonList("Graph.Scope")), graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
@Test
public void clientWithClientCredentialsPermissions() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = fakeValue:/.default",
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials"
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
assertEquals(repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID).getAuthorizationGrantType(),
AuthorizationGrantType.AUTHORIZATION_CODE);
assertEquals(repository.findByRegistrationId("graph").getAuthorizationGrantType(),
AuthorizationGrantType.CLIENT_CREDENTIALS);
});
}
@Test
public void clientWhichIsNotAuthorizationCodeButOnDemandExceptionTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context ->
assertThrows(IllegalStateException.class, () -> context.getBean(AADAuthenticationProperties.class))
);
}
@Test
public void azureClientEndpointTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"https:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/", azure.getRedirectUri());
});
}
@Test
public void customizeUriTest() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.base-uri = http:
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"http:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
});
}
@Test
public void testNoGroupIdAndGroupNameConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
});
}
@Test
public void testGroupNameConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues("azure.activedirectory.user-group.allowed-group-names = group1, group2")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupIdConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupNameAndGroupIdConfigured() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-names = group1, group2",
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.authorization-clients.office.scopes = "
+ "https:
"azure.activedirectory.authorization-clients.arm.scopes = "
+ "https:
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
int resourceServerCountInAuthCode = resourceServerCount(azure.getScopes());
assertTrue(resourceServerCountInAuthCode > 1);
int resourceServerCountInAccessToken =
resourceServerCount(repository.getAzureClientAccessTokenScopes());
assertTrue(resourceServerCountInAccessToken != 0);
});
}
@Disabled
@Test
public void noConfigurationOnMissingRequiredProperties() {
WebApplicationContextRunnerUtils
.getContextRunner()
.run(context -> {
assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class);
assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class);
assertThat(context).doesNotHaveBean(OAuth2UserService.class);
});
}
@Test
public void resourceServerCountTest() {
Set<String> scopes = new HashSet<>();
assertEquals(resourceServerCount(scopes), 0);
scopes.add("openid");
scopes.add("profile");
scopes.add("offline_access");
assertEquals(resourceServerCount(scopes), 0);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} | class AADClientRegistrationRepositoryTest {
@Test
public void noClientsConfiguredTest() {
webApplicationContextRunner()
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(1, clients.size());
assertEquals(azure, clients.get(0));
});
}
@Test
public void azureClientConfiguredTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.azure.scopes = Azure.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Azure.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
public void graphClientConfiguredTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = Graph.Scope"
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")),
repository.getAzureClientAccessTokenScopes());
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(AUTHORIZATION_CODE.getValue(), azure.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Arrays.asList("Graph.Scope", "openid", "profile", "offline_access")),
azure.getScopes());
ClientRegistration graph = repository.findByRegistrationId("graph");
assertEquals(AZURE_DELEGATED.getValue(), graph.getAuthorizationGrantType().getValue());
assertEquals(new HashSet<>(Collections.singletonList("Graph.Scope")), graph.getScopes());
List<ClientRegistration> clients = collectClients(repository);
assertEquals(Collections.singletonList(azure), clients);
});
}
@Test
@Test
public void clientWithClientCredentialsPermissions() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.scopes = fakeValue:/.default",
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials"
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
assertEquals(repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID).getAuthorizationGrantType(),
AuthorizationGrantType.AUTHORIZATION_CODE);
assertEquals(repository.findByRegistrationId("graph").getAuthorizationGrantType(),
AuthorizationGrantType.CLIENT_CREDENTIALS);
});
}
@Test
public void clientWhichIsNotAuthorizationCodeButOnDemandExceptionTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.graph.authorizationGrantType = client_credentials",
"azure.activedirectory.authorization-clients.graph.on-demand = true"
)
.run(context ->
assertThrows(IllegalStateException.class, () -> context.getBean(AADAuthenticationProperties.class))
);
}
@Test
public void azureClientEndpointTest() {
webApplicationContextRunner()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
assertEquals("fake-client-id", azure.getClientId());
assertEquals("fake-client-secret", azure.getClientSecret());
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"https:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
assertEquals("{baseUrl}/login/oauth2/code/", azure.getRedirectUri());
});
}
@Test
public void customizeUriTest() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.base-uri = http:
)
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints(
"http:
assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri());
assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri());
assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri());
});
}
@Test
public void testNoGroupIdAndGroupNameConfigured() {
webApplicationContextRunner()
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(new HashSet<>(Arrays.asList("openid", "profile", "offline_access")), azure.getScopes());
});
}
@Test
public void testGroupNameConfigured() {
webApplicationContextRunner()
.withPropertyValues("azure.activedirectory.user-group.allowed-group-names = group1, group2")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupIdConfigured() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void testGroupNameAndGroupIdConfigured() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-names = group1, group2",
"azure.activedirectory.user-group.allowed-group-ids = 7c3a5d22-9093-42d7-b2eb-e72d06bf3718")
.run(context -> {
ClientRegistrationRepository repository = context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertEquals(
new HashSet<>(Arrays.asList(
"openid", "profile", "offline_access", "https:
azure.getScopes());
});
}
@Test
public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() {
webApplicationContextRunner()
.withPropertyValues(
"azure.activedirectory.authorization-clients.office.scopes = "
+ "https:
"azure.activedirectory.authorization-clients.arm.scopes = "
+ "https:
)
.run(context -> {
AADClientRegistrationRepository repository =
(AADClientRegistrationRepository) context.getBean(ClientRegistrationRepository.class);
ClientRegistration azure = repository.findByRegistrationId(AZURE_CLIENT_REGISTRATION_ID);
assertNotNull(azure);
int resourceServerCountInAuthCode = resourceServerCount(azure.getScopes());
assertTrue(resourceServerCountInAuthCode > 1);
int resourceServerCountInAccessToken =
resourceServerCount(repository.getAzureClientAccessTokenScopes());
assertTrue(resourceServerCountInAccessToken != 0);
});
}
@Disabled
@Test
public void noConfigurationOnMissingRequiredProperties() {
oauthClientRunner()
.run(context -> {
assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class);
assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class);
assertThat(context).doesNotHaveBean(OAuth2UserService.class);
});
}
@Test
public void resourceServerCountTest() {
Set<String> scopes = new HashSet<>();
assertEquals(resourceServerCount(scopes), 0);
scopes.add("openid");
scopes.add("profile");
scopes.add("offline_access");
assertEquals(resourceServerCount(scopes), 0);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 1);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
scopes.add("https:
assertEquals(resourceServerCount(scopes), 2);
}
private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) {
List<ClientRegistration> result = new ArrayList<>();
itr.forEach(result::add);
return result;
}
} |
could we use more elaborate variable names? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | com.azure.storage.common.ParallelTransferOptions pOptions = | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
what's r ? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | (r, conditions) -> client.downloadWithResponse(r, null, conditions, false); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
how can this happen? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
how can this happen? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Should `buildBlobPropertiesResponse` be moved to `ModelHelper` (or whatever type we have to dump model transformations) ? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue(); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Are we collecting Flux of ByteBuffer into single ByteBuffer here? If so then I'd rather collect the Flux into collection and pass it around instead to avoid mem copies until deemed necessary. | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | .map(ByteBuffer::wrap).block(); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
It can't because we always emit a value, but ci interprets a call to block as possibly returning null and requires a check | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
which tool reports that? is it possible to suppress this warning inline @alzimmermsft ? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.