comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
so complicated? | void shouldConfigureConsumerPrefetchCount() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(TestProcessorContainerConfiguration.class))
.withPropertyValues(
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.prefetch-count=150",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.destination=dest",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.group=group",
"spring.cloud.stream.eventhubs.namespace=namespace"
)
.run(context -> {
EventHubsExtendedBindingProperties properties = context.getBean(EventHubsExtendedBindingProperties.class);
EventHubsConsumerProperties consumerProperties = properties.getExtendedConsumerProperties("consume-in-0");
assertThat(consumerProperties.getPrefetchCount()).isEqualTo(150);
TestEventHubsMessageChannelBinder channelBinder = context.getBean(TestEventHubsMessageChannelBinder.class);
assertThat(channelBinder).isNotNull();
ConsumerDestination destination = mock(ConsumerDestination.class);
when(destination.getName()).thenReturn("dest");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
channelBinder.setCheckpointStore(checkpointStore);
EventHubsInboundChannelAdapter channelAdapter = (EventHubsInboundChannelAdapter) channelBinder.createConsumerEndpoint(
destination, "test", new ExtendedConsumerProperties<>(consumerProperties));
TestEventHubsProcessorContainer processorContainer =
(TestEventHubsProcessorContainer) ReflectionTestUtils.getField(channelAdapter, "processorContainer");
TestDefaultEventHubsNamespaceProcessorFactory processorFactory = (TestDefaultEventHubsNamespaceProcessorFactory) processorContainer.getProcessorFactory();
TestEventProcessorClientBuilderFactory clientBuilderFactory =
(TestEventProcessorClientBuilderFactory) processorFactory.createEventProcessorClientBuilderFactory(null, consumerProperties);
EventProcessorClientBuilder processorClientBuilder = clientBuilderFactory.build();
processorClientBuilder.buildEventProcessorClient();
verify(processorClientBuilder, times(1)).prefetchCount(150);
});
} | EventHubsExtendedBindingProperties properties = context.getBean(EventHubsExtendedBindingProperties.class); | void shouldConfigureConsumerPrefetchCount() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(TestProcessorContainerConfiguration.class))
.withPropertyValues(
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.prefetch-count=150",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.destination=dest",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.group=group",
"spring.cloud.stream.eventhubs.namespace=namespace"
)
.run(context -> {
EventHubsExtendedBindingProperties properties = context.getBean(EventHubsExtendedBindingProperties.class);
EventHubsConsumerProperties consumerProperties = properties.getExtendedConsumerProperties("consume-in-0");
assertThat(consumerProperties.getPrefetchCount()).isEqualTo(150);
TestEventHubsMessageChannelBinder channelBinder = context.getBean(TestEventHubsMessageChannelBinder.class);
assertThat(channelBinder).isNotNull();
ConsumerDestination destination = mock(ConsumerDestination.class);
when(destination.getName()).thenReturn("dest");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
channelBinder.setCheckpointStore(checkpointStore);
EventHubsInboundChannelAdapter channelAdapter = (EventHubsInboundChannelAdapter) channelBinder.createConsumerEndpoint(
destination, "test", new ExtendedConsumerProperties<>(consumerProperties));
TestEventHubsProcessorContainer processorContainer =
(TestEventHubsProcessorContainer) ReflectionTestUtils.getField(channelAdapter, "processorContainer");
TestDefaultEventHubsNamespaceProcessorFactory processorFactory = (TestDefaultEventHubsNamespaceProcessorFactory) processorContainer.getProcessorFactory();
TestEventProcessorClientBuilderFactory clientBuilderFactory =
(TestEventProcessorClientBuilderFactory) processorFactory.createEventProcessorClientBuilderFactory(null, consumerProperties);
EventProcessorClientBuilder processorClientBuilder = clientBuilderFactory.build();
processorClientBuilder.buildEventProcessorClient();
verify(processorClientBuilder, times(1)).prefetchCount(150);
});
} | class EventHubsBinderConfigurationTests {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(EventHubsBinderConfiguration.class));
@Test
void configurationNotMatchedWhenBinderBeanExist() {
this.contextRunner
.withBean(Binder.class, () -> mock(Binder.class))
.run(context -> {
assertThat(context).doesNotHaveBean(EventHubsBinderConfiguration.class);
assertThat(context).doesNotHaveBean(EventHubsMessageChannelBinder.class);
});
}
@Test
void shouldConfigureDefaultChannelProvisionerWhenNoResourceManagerProvided() {
this.contextRunner
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isNotInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
void shouldConfigureArmChannelProvisionerWhenResourceManagerProvided() {
AzureEventHubsProperties properties = new AzureEventHubsProperties();
properties.setNamespace("test");
this.contextRunner
.withBean(EventHubsProvisioner.class, () -> mock(EventHubsProvisioner.class))
.withBean(AzureEventHubsProperties.class, () -> properties)
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
@Configuration
@EnableConfigurationProperties(EventHubsExtendedBindingProperties.class)
static class TestProcessorContainerConfiguration {
@Bean
public TestEventHubsMessageChannelBinder eventHubBinder(EventHubsExtendedBindingProperties bindingProperties,
ObjectProvider<NamespaceProperties> namespaceProperties,
ObjectProvider<CheckpointStore> checkpointStores) {
EventHubsConsumerProperties consumerProperties = bindingProperties.getExtendedConsumerProperties(
"consume-in-0");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
TestDefaultEventHubsNamespaceProcessorFactory factory = spy(new TestDefaultEventHubsNamespaceProcessorFactory(
checkpointStore, new NamespaceProperties(), (key) -> {
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
}));
TestEventHubsProcessorContainer container = spy(new TestEventHubsProcessorContainer(factory));
EventHubsInboundChannelAdapter messageProducer = spy(new EventHubsInboundChannelAdapter(container,
"dest", "group", consumerProperties.getCheckpoint()));
TestEventHubsMessageChannelBinder binder = new TestEventHubsMessageChannelBinder(null,
new EventHubsChannelProvisioner(), null, messageProducer);
binder.setBindingProperties(bindingProperties);
binder.setNamespaceProperties(namespaceProperties.getIfAvailable());
checkpointStores.ifAvailable(binder::setCheckpointStore);
return binder;
}
}
static class TestDefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private DefaultEventHubsNamespaceProcessorFactory delegate;
public AbstractAzureAmqpClientBuilderFactory<EventProcessorClientBuilder> createEventProcessorClientBuilderFactory(EventProcessingListener listener,
ProcessorProperties processorProperties) {
TestEventProcessorClientBuilderFactory factory =
new TestEventProcessorClientBuilderFactory(processorProperties);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
return factory;
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
TestDefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.delegate = new DefaultEventHubsNamespaceProcessorFactory(checkpointStore, namespaceProperties, supplier);
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return this.delegate.createProcessor(eventHub, consumerGroup, listener);
}
@Override
public void destroy() {
this.delegate.destroy();
}
@Override
public void addListener(EventHubsProcessorFactory.Listener listener) {
this.delegate.addListener(listener);
}
@Override
public boolean removeListener(EventHubsProcessorFactory.Listener listener) {
return this.delegate.removeListener(listener);
}
}
static class TestEventHubsProcessorContainer extends EventHubsProcessorContainer {
private EventHubsProcessorFactory processorFactory;
/**
* Create an instance using the supplied processor factory.
*
* @param processorFactory the processor factory.
*/
TestEventHubsProcessorContainer(EventHubsProcessorFactory processorFactory) {
super(processorFactory);
this.processorFactory = processorFactory;
}
public EventHubsProcessorFactory getProcessorFactory() {
return processorFactory;
}
}
static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory {
TestEventProcessorClientBuilderFactory(EventProcessorClientProperties eventProcessorClientProperties) {
super(eventProcessorClientProperties, null, mock(EventProcessingListener.class));
}
@Override
public EventProcessorClientBuilder createBuilderInstance() {
return mock(EventProcessorClientBuilder.class);
}
}
} | class EventHubsBinderConfigurationTests {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(EventHubsBinderConfiguration.class));
@Test
void configurationNotMatchedWhenBinderBeanExist() {
this.contextRunner
.withBean(Binder.class, () -> mock(Binder.class))
.run(context -> {
assertThat(context).doesNotHaveBean(EventHubsBinderConfiguration.class);
assertThat(context).doesNotHaveBean(EventHubsMessageChannelBinder.class);
});
}
@Test
void shouldConfigureDefaultChannelProvisionerWhenNoResourceManagerProvided() {
this.contextRunner
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isNotInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
void shouldConfigureArmChannelProvisionerWhenResourceManagerProvided() {
AzureEventHubsProperties properties = new AzureEventHubsProperties();
properties.setNamespace("test");
this.contextRunner
.withBean(EventHubsProvisioner.class, () -> mock(EventHubsProvisioner.class))
.withBean(AzureEventHubsProperties.class, () -> properties)
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
@Configuration
@EnableConfigurationProperties(EventHubsExtendedBindingProperties.class)
static class TestProcessorContainerConfiguration {
@Bean
public TestEventHubsMessageChannelBinder eventHubBinder(EventHubsExtendedBindingProperties bindingProperties,
ObjectProvider<NamespaceProperties> namespaceProperties,
ObjectProvider<CheckpointStore> checkpointStores) {
EventHubsConsumerProperties consumerProperties = bindingProperties.getExtendedConsumerProperties(
"consume-in-0");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
TestDefaultEventHubsNamespaceProcessorFactory factory = spy(new TestDefaultEventHubsNamespaceProcessorFactory(
checkpointStore, new NamespaceProperties(), (key) -> {
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
}));
TestEventHubsProcessorContainer container = spy(new TestEventHubsProcessorContainer(factory));
EventHubsInboundChannelAdapter messageProducer = spy(new EventHubsInboundChannelAdapter(container,
"dest", "group", consumerProperties.getCheckpoint()));
TestEventHubsMessageChannelBinder binder = new TestEventHubsMessageChannelBinder(null,
new EventHubsChannelProvisioner(), null, messageProducer);
binder.setBindingProperties(bindingProperties);
binder.setNamespaceProperties(namespaceProperties.getIfAvailable());
checkpointStores.ifAvailable(binder::setCheckpointStore);
return binder;
}
}
static class TestDefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private DefaultEventHubsNamespaceProcessorFactory delegate;
public AbstractAzureAmqpClientBuilderFactory<EventProcessorClientBuilder> createEventProcessorClientBuilderFactory(EventProcessingListener listener,
ProcessorProperties processorProperties) {
TestEventProcessorClientBuilderFactory factory =
new TestEventProcessorClientBuilderFactory(processorProperties);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
return factory;
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
TestDefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.delegate = new DefaultEventHubsNamespaceProcessorFactory(checkpointStore, namespaceProperties, supplier);
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return this.delegate.createProcessor(eventHub, consumerGroup, listener);
}
@Override
public void destroy() {
this.delegate.destroy();
}
@Override
public void addListener(EventHubsProcessorFactory.Listener listener) {
this.delegate.addListener(listener);
}
@Override
public boolean removeListener(EventHubsProcessorFactory.Listener listener) {
return this.delegate.removeListener(listener);
}
}
static class TestEventHubsProcessorContainer extends EventHubsProcessorContainer {
private EventHubsProcessorFactory processorFactory;
/**
* Create an instance using the supplied processor factory.
*
* @param processorFactory the processor factory.
*/
TestEventHubsProcessorContainer(EventHubsProcessorFactory processorFactory) {
super(processorFactory);
this.processorFactory = processorFactory;
}
public EventHubsProcessorFactory getProcessorFactory() {
return processorFactory;
}
}
static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory {
TestEventProcessorClientBuilderFactory(EventProcessorClientProperties eventProcessorClientProperties) {
super(eventProcessorClientProperties, null, mock(EventProcessingListener.class));
}
@Override
public EventProcessorClientBuilder createBuilderInstance() {
return mock(EventProcessorClientBuilder.class);
}
}
} |
This method `prefetchCount` is our target to verify, then the builder `EventProcessorClientBuilder` should be a mock object. If using `spy` the factory class, then the real builder will be created. | void customPrefetchCount() {
TestAzureEventHubsProperties properties = createMinimalServiceProperties();
properties.getProcessor().setPrefetchCount(150);
final TestEventProcessorClientBuilderFactory builderFactory = new TestEventProcessorClientBuilderFactory(properties);
final EventProcessorClientBuilder builder = builderFactory.build();
verify(builder, times(1)).prefetchCount(150);
} | final TestEventProcessorClientBuilderFactory builderFactory = new TestEventProcessorClientBuilderFactory(properties); | void customPrefetchCount() {
TestAzureEventHubsProperties properties = createMinimalServiceProperties();
properties.getProcessor().setPrefetchCount(150);
final TestEventProcessorClientBuilderFactory builderFactory = new TestEventProcessorClientBuilderFactory(properties);
final EventProcessorClientBuilder builder = builderFactory.build();
verify(builder, times(1)).prefetchCount(150);
} | class EventProcessorClientBuilderFactoryTests extends AzureServiceClientBuilderFactoryBaseTests<EventProcessorClientBuilder,
TestAzureEventHubsProperties, EventProcessorClientBuilderFactory> {
@Override
protected TestAzureEventHubsProperties createMinimalServiceProperties() {
return new TestAzureEventHubsProperties();
}
@Test
static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory {
TestEventProcessorClientBuilderFactory(TestAzureEventHubsProperties properties) {
super(properties.getProcessor(), null, mock(EventProcessingListener.class));
}
@Override
public EventProcessorClientBuilder createBuilderInstance() {
return mock(EventProcessorClientBuilder.class);
}
}
} | class EventProcessorClientBuilderFactoryTests extends AzureServiceClientBuilderFactoryBaseTests<EventProcessorClientBuilder,
TestAzureEventHubsProperties, EventProcessorClientBuilderFactory> {
@Override
protected TestAzureEventHubsProperties createMinimalServiceProperties() {
return new TestAzureEventHubsProperties();
}
@Test
static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory {
TestEventProcessorClientBuilderFactory(TestAzureEventHubsProperties properties) {
super(properties.getProcessor(), null, mock(EventProcessingListener.class));
}
@Override
public EventProcessorClientBuilder createBuilderInstance() {
return mock(EventProcessorClientBuilder.class);
}
}
} |
Yes, too complex, I don't find a better way to replace the mock builder. | void shouldConfigureConsumerPrefetchCount() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(TestProcessorContainerConfiguration.class))
.withPropertyValues(
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.prefetch-count=150",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.destination=dest",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.group=group",
"spring.cloud.stream.eventhubs.namespace=namespace"
)
.run(context -> {
EventHubsExtendedBindingProperties properties = context.getBean(EventHubsExtendedBindingProperties.class);
EventHubsConsumerProperties consumerProperties = properties.getExtendedConsumerProperties("consume-in-0");
assertThat(consumerProperties.getPrefetchCount()).isEqualTo(150);
TestEventHubsMessageChannelBinder channelBinder = context.getBean(TestEventHubsMessageChannelBinder.class);
assertThat(channelBinder).isNotNull();
ConsumerDestination destination = mock(ConsumerDestination.class);
when(destination.getName()).thenReturn("dest");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
channelBinder.setCheckpointStore(checkpointStore);
EventHubsInboundChannelAdapter channelAdapter = (EventHubsInboundChannelAdapter) channelBinder.createConsumerEndpoint(
destination, "test", new ExtendedConsumerProperties<>(consumerProperties));
TestEventHubsProcessorContainer processorContainer =
(TestEventHubsProcessorContainer) ReflectionTestUtils.getField(channelAdapter, "processorContainer");
TestDefaultEventHubsNamespaceProcessorFactory processorFactory = (TestDefaultEventHubsNamespaceProcessorFactory) processorContainer.getProcessorFactory();
TestEventProcessorClientBuilderFactory clientBuilderFactory =
(TestEventProcessorClientBuilderFactory) processorFactory.createEventProcessorClientBuilderFactory(null, consumerProperties);
EventProcessorClientBuilder processorClientBuilder = clientBuilderFactory.build();
processorClientBuilder.buildEventProcessorClient();
verify(processorClientBuilder, times(1)).prefetchCount(150);
});
} | EventHubsExtendedBindingProperties properties = context.getBean(EventHubsExtendedBindingProperties.class); | void shouldConfigureConsumerPrefetchCount() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(TestProcessorContainerConfiguration.class))
.withPropertyValues(
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.prefetch-count=150",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.destination=dest",
"spring.cloud.stream.eventhubs.bindings.consume-in-0.consumer.group=group",
"spring.cloud.stream.eventhubs.namespace=namespace"
)
.run(context -> {
EventHubsExtendedBindingProperties properties = context.getBean(EventHubsExtendedBindingProperties.class);
EventHubsConsumerProperties consumerProperties = properties.getExtendedConsumerProperties("consume-in-0");
assertThat(consumerProperties.getPrefetchCount()).isEqualTo(150);
TestEventHubsMessageChannelBinder channelBinder = context.getBean(TestEventHubsMessageChannelBinder.class);
assertThat(channelBinder).isNotNull();
ConsumerDestination destination = mock(ConsumerDestination.class);
when(destination.getName()).thenReturn("dest");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
channelBinder.setCheckpointStore(checkpointStore);
EventHubsInboundChannelAdapter channelAdapter = (EventHubsInboundChannelAdapter) channelBinder.createConsumerEndpoint(
destination, "test", new ExtendedConsumerProperties<>(consumerProperties));
TestEventHubsProcessorContainer processorContainer =
(TestEventHubsProcessorContainer) ReflectionTestUtils.getField(channelAdapter, "processorContainer");
TestDefaultEventHubsNamespaceProcessorFactory processorFactory = (TestDefaultEventHubsNamespaceProcessorFactory) processorContainer.getProcessorFactory();
TestEventProcessorClientBuilderFactory clientBuilderFactory =
(TestEventProcessorClientBuilderFactory) processorFactory.createEventProcessorClientBuilderFactory(null, consumerProperties);
EventProcessorClientBuilder processorClientBuilder = clientBuilderFactory.build();
processorClientBuilder.buildEventProcessorClient();
verify(processorClientBuilder, times(1)).prefetchCount(150);
});
} | class EventHubsBinderConfigurationTests {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(EventHubsBinderConfiguration.class));
@Test
void configurationNotMatchedWhenBinderBeanExist() {
this.contextRunner
.withBean(Binder.class, () -> mock(Binder.class))
.run(context -> {
assertThat(context).doesNotHaveBean(EventHubsBinderConfiguration.class);
assertThat(context).doesNotHaveBean(EventHubsMessageChannelBinder.class);
});
}
@Test
void shouldConfigureDefaultChannelProvisionerWhenNoResourceManagerProvided() {
this.contextRunner
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isNotInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
void shouldConfigureArmChannelProvisionerWhenResourceManagerProvided() {
AzureEventHubsProperties properties = new AzureEventHubsProperties();
properties.setNamespace("test");
this.contextRunner
.withBean(EventHubsProvisioner.class, () -> mock(EventHubsProvisioner.class))
.withBean(AzureEventHubsProperties.class, () -> properties)
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
@Configuration
@EnableConfigurationProperties(EventHubsExtendedBindingProperties.class)
static class TestProcessorContainerConfiguration {
@Bean
public TestEventHubsMessageChannelBinder eventHubBinder(EventHubsExtendedBindingProperties bindingProperties,
ObjectProvider<NamespaceProperties> namespaceProperties,
ObjectProvider<CheckpointStore> checkpointStores) {
EventHubsConsumerProperties consumerProperties = bindingProperties.getExtendedConsumerProperties(
"consume-in-0");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
TestDefaultEventHubsNamespaceProcessorFactory factory = spy(new TestDefaultEventHubsNamespaceProcessorFactory(
checkpointStore, new NamespaceProperties(), (key) -> {
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
}));
TestEventHubsProcessorContainer container = spy(new TestEventHubsProcessorContainer(factory));
EventHubsInboundChannelAdapter messageProducer = spy(new EventHubsInboundChannelAdapter(container,
"dest", "group", consumerProperties.getCheckpoint()));
TestEventHubsMessageChannelBinder binder = new TestEventHubsMessageChannelBinder(null,
new EventHubsChannelProvisioner(), null, messageProducer);
binder.setBindingProperties(bindingProperties);
binder.setNamespaceProperties(namespaceProperties.getIfAvailable());
checkpointStores.ifAvailable(binder::setCheckpointStore);
return binder;
}
}
static class TestDefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private DefaultEventHubsNamespaceProcessorFactory delegate;
public AbstractAzureAmqpClientBuilderFactory<EventProcessorClientBuilder> createEventProcessorClientBuilderFactory(EventProcessingListener listener,
ProcessorProperties processorProperties) {
TestEventProcessorClientBuilderFactory factory =
new TestEventProcessorClientBuilderFactory(processorProperties);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
return factory;
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
TestDefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.delegate = new DefaultEventHubsNamespaceProcessorFactory(checkpointStore, namespaceProperties, supplier);
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return this.delegate.createProcessor(eventHub, consumerGroup, listener);
}
@Override
public void destroy() {
this.delegate.destroy();
}
@Override
public void addListener(EventHubsProcessorFactory.Listener listener) {
this.delegate.addListener(listener);
}
@Override
public boolean removeListener(EventHubsProcessorFactory.Listener listener) {
return this.delegate.removeListener(listener);
}
}
static class TestEventHubsProcessorContainer extends EventHubsProcessorContainer {
private EventHubsProcessorFactory processorFactory;
/**
* Create an instance using the supplied processor factory.
*
* @param processorFactory the processor factory.
*/
TestEventHubsProcessorContainer(EventHubsProcessorFactory processorFactory) {
super(processorFactory);
this.processorFactory = processorFactory;
}
public EventHubsProcessorFactory getProcessorFactory() {
return processorFactory;
}
}
static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory {
TestEventProcessorClientBuilderFactory(EventProcessorClientProperties eventProcessorClientProperties) {
super(eventProcessorClientProperties, null, mock(EventProcessingListener.class));
}
@Override
public EventProcessorClientBuilder createBuilderInstance() {
return mock(EventProcessorClientBuilder.class);
}
}
} | class EventHubsBinderConfigurationTests {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(EventHubsBinderConfiguration.class));
@Test
void configurationNotMatchedWhenBinderBeanExist() {
this.contextRunner
.withBean(Binder.class, () -> mock(Binder.class))
.run(context -> {
assertThat(context).doesNotHaveBean(EventHubsBinderConfiguration.class);
assertThat(context).doesNotHaveBean(EventHubsMessageChannelBinder.class);
});
}
@Test
void shouldConfigureDefaultChannelProvisionerWhenNoResourceManagerProvided() {
this.contextRunner
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isNotInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
void shouldConfigureArmChannelProvisionerWhenResourceManagerProvided() {
AzureEventHubsProperties properties = new AzureEventHubsProperties();
properties.setNamespace("test");
this.contextRunner
.withBean(EventHubsProvisioner.class, () -> mock(EventHubsProvisioner.class))
.withBean(AzureEventHubsProperties.class, () -> properties)
.run(context -> {
assertThat(context).hasSingleBean(EventHubsBinderConfiguration.class);
assertThat(context).hasSingleBean(EventHubsExtendedBindingProperties.class);
assertThat(context).hasSingleBean(EventHubsChannelProvisioner.class);
assertThat(context).hasSingleBean(EventHubsMessageChannelBinder.class);
EventHubsChannelProvisioner channelProvisioner = context.getBean(EventHubsChannelProvisioner.class);
assertThat(channelProvisioner).isInstanceOf(EventHubsChannelResourceManagerProvisioner.class);
});
}
@Test
@Configuration
@EnableConfigurationProperties(EventHubsExtendedBindingProperties.class)
static class TestProcessorContainerConfiguration {
@Bean
public TestEventHubsMessageChannelBinder eventHubBinder(EventHubsExtendedBindingProperties bindingProperties,
ObjectProvider<NamespaceProperties> namespaceProperties,
ObjectProvider<CheckpointStore> checkpointStores) {
EventHubsConsumerProperties consumerProperties = bindingProperties.getExtendedConsumerProperties(
"consume-in-0");
CheckpointStore checkpointStore = mock(CheckpointStore.class);
TestDefaultEventHubsNamespaceProcessorFactory factory = spy(new TestDefaultEventHubsNamespaceProcessorFactory(
checkpointStore, new NamespaceProperties(), (key) -> {
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
}));
TestEventHubsProcessorContainer container = spy(new TestEventHubsProcessorContainer(factory));
EventHubsInboundChannelAdapter messageProducer = spy(new EventHubsInboundChannelAdapter(container,
"dest", "group", consumerProperties.getCheckpoint()));
TestEventHubsMessageChannelBinder binder = new TestEventHubsMessageChannelBinder(null,
new EventHubsChannelProvisioner(), null, messageProducer);
binder.setBindingProperties(bindingProperties);
binder.setNamespaceProperties(namespaceProperties.getIfAvailable());
checkpointStores.ifAvailable(binder::setCheckpointStore);
return binder;
}
}
static class TestDefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private DefaultEventHubsNamespaceProcessorFactory delegate;
public AbstractAzureAmqpClientBuilderFactory<EventProcessorClientBuilder> createEventProcessorClientBuilderFactory(EventProcessingListener listener,
ProcessorProperties processorProperties) {
TestEventProcessorClientBuilderFactory factory =
new TestEventProcessorClientBuilderFactory(processorProperties);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
return factory;
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
TestDefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.delegate = new DefaultEventHubsNamespaceProcessorFactory(checkpointStore, namespaceProperties, supplier);
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return this.delegate.createProcessor(eventHub, consumerGroup, listener);
}
@Override
public void destroy() {
this.delegate.destroy();
}
@Override
public void addListener(EventHubsProcessorFactory.Listener listener) {
this.delegate.addListener(listener);
}
@Override
public boolean removeListener(EventHubsProcessorFactory.Listener listener) {
return this.delegate.removeListener(listener);
}
}
static class TestEventHubsProcessorContainer extends EventHubsProcessorContainer {
private EventHubsProcessorFactory processorFactory;
/**
* Create an instance using the supplied processor factory.
*
* @param processorFactory the processor factory.
*/
TestEventHubsProcessorContainer(EventHubsProcessorFactory processorFactory) {
super(processorFactory);
this.processorFactory = processorFactory;
}
public EventHubsProcessorFactory getProcessorFactory() {
return processorFactory;
}
}
static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory {
TestEventProcessorClientBuilderFactory(EventProcessorClientProperties eventProcessorClientProperties) {
super(eventProcessorClientProperties, null, mock(EventProcessingListener.class));
}
@Override
public EventProcessorClientBuilder createBuilderInstance() {
return mock(EventProcessorClientBuilder.class);
}
}
} |
https://github.com/Azure/azure-sdk-for-java/pull/26819/commits/2b5858f08801314162458b7e44339a06a353727a Handling separately policies perCall and perRetry. Please verify. | public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
} | this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null.")); | public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline pipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code endpoint} is {@code null}.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param pipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code clientOptions} is {@code null}.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.pipeline != null) {
return this.pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policyList.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policyList.add(this.createRequestIdPolicy());
policyList.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policyList.add(this.createAuthenticationPolicy());
policyList.add(this.createCookiePolicy());
if (this.additionalPolicies.size() > 0) {
policyList.addAll(this.additionalPolicies);
}
policyList.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policyList.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or it cannot be parsed into a valid URL.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.httpPipeline != null) {
return this.httpPipeline;
}
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = CoreUtils.getApplicationId(buildClientOptions, buildLogOptions);
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policies.add(this.createRequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(this.createAuthenticationPolicy());
policies.add(this.createCookiePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} |
We have dedicated schedulers for bulkExecutor, rntbd (CosmosSchedulers.cs), so I wonder should we also use a dedicated one for changeFeedProcessor. | public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
} | this.scheduler = Schedulers.boundedElastic(); | public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
if (scheduler == null) {
throw new IllegalArgumentException("scheduler");
}
this.scheduler = scheduler;
return this;
}
} |
so if the maxScaleCount is 0, then we will use non-greedy algorithm, else keep use the greedy algorithm? why do we need to have a different handling logic here? what is the scenario the non-greedy algorithm will help? and will the scenario happen even when customer has set the maxScaleCount? | public List<Lease> selectLeasesToTake(List<Lease> allLeases) {
Map<String, Integer> workerToPartitionCount = new HashMap<>();
List<Lease> expiredLeases = new ArrayList<>();
Map<String, Lease> allPartitions = new HashMap<>();
this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount);
int partitionCount = allPartitions.size();
int workerCount = workerToPartitionCount.size();
if (partitionCount <= 0) {
return new ArrayList<Lease>();
}
int target = this.calculateTargetPartitionCount(partitionCount, workerCount);
int myCount = workerToPartitionCount.get(this.hostName);
int partitionsNeededForMe = target - myCount;
if (expiredLeases.size() > 0) {
if (this.maxPartitionCount == 0 && (partitionsNeededForMe <= 0 || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1))) {
partitionsNeededForMe = 1;
}
for (Lease lease : expiredLeases) {
this.logger.info("Found unused or expired lease {}; previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ",
lease.getLeaseToken(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount);
}
return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size()));
}
if (partitionsNeededForMe <= 0)
return new ArrayList<Lease>();
Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions);
List<Lease> stolenLeases = new ArrayList<>();
if (stolenLease != null) {
stolenLeases.add(stolenLease);
}
return stolenLeases;
} | if (this.maxPartitionCount == 0 && (partitionsNeededForMe <= 0 || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1))) { | public List<Lease> selectLeasesToTake(List<Lease> allLeases) {
Map<String, Integer> workerToPartitionCount = new HashMap<>();
List<Lease> expiredLeases = new ArrayList<>();
Map<String, Lease> allPartitions = new HashMap<>();
this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount);
int partitionCount = allPartitions.size();
int workerCount = workerToPartitionCount.size();
if (partitionCount <= 0) {
return new ArrayList<Lease>();
}
int target = this.calculateTargetPartitionCount(partitionCount, workerCount);
int myCount = workerToPartitionCount.get(this.hostName);
int partitionsNeededForMe = target - myCount;
if (expiredLeases.size() > 0) {
if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) {
partitionsNeededForMe = 1;
}
if (partitionsNeededForMe == 1) {
Random random = new Random();
Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size()));
this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ",
expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount);
return Collections.singletonList(expiredLease);
} else {
for (Lease lease : expiredLeases) {
this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ",
lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount);
}
}
return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size()));
}
if (partitionsNeededForMe <= 0)
return new ArrayList<Lease>();
Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions);
List<Lease> stolenLeases = new ArrayList<>();
if (stolenLease != null) {
stolenLeases.add(stolenLease);
}
return stolenLeases;
} | class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy {
private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class);
private final String hostName;
private final int minPartitionCount;
private final int maxPartitionCount;
private final Duration leaseExpirationInterval;
public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) {
if (hostName == null) {
throw new IllegalArgumentException("hostName");
}
this.hostName = hostName;
this.minPartitionCount = minPartitionCount;
this.maxPartitionCount = maxPartitionCount;
this.leaseExpirationInterval = leaseExpirationInterval;
}
@Override
private static Lease getLeaseToSteal(
Map<String, Integer> workerToPartitionCount,
int target,
int partitionsNeededForMe,
Map<String, Lease> allPartitions) {
Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount);
if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) {
for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) {
if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) {
return entry.getValue();
}
}
}
return null;
}
private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) {
Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0);
for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) {
if (workerToStealFrom.getValue() <= entry.getValue()) {
workerToStealFrom = entry;
}
}
return workerToStealFrom;
}
private int calculateTargetPartitionCount(int partitionCount, int workerCount) {
int target = 1;
if (partitionCount > workerCount) {
target = (int)Math.ceil((double)partitionCount / workerCount);
}
if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) {
target = this.maxPartitionCount;
}
if (this.minPartitionCount > 0 && target < this.minPartitionCount) {
target = this.minPartitionCount;
}
return target;
}
private void categorizeLeases(
List<Lease> allLeases,
Map<String, Lease> allPartitions,
List<Lease> expiredLeases,
Map<String, Integer> workerToPartitionCount) {
for (Lease lease : allLeases) {
allPartitions.put(lease.getLeaseToken(), lease);
if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) {
expiredLeases.add(lease);
} else {
String assignedTo = lease.getOwner();
Integer count = workerToPartitionCount.get(assignedTo);
if (count != null) {
workerToPartitionCount.replace(assignedTo, count + 1);
} else {
workerToPartitionCount.put(assignedTo, 1);
}
}
}
if (!workerToPartitionCount.containsKey(this.hostName)) {
workerToPartitionCount.put(this.hostName, 0);
}
}
private boolean isExpired(Lease lease) {
if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) {
return true;
}
Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval);
this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now());
return leaseExpireTime.isBefore(Instant.now());
}
} | class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy {
private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class);
private final String hostName;
private final int minPartitionCount;
private final int maxPartitionCount;
private final Duration leaseExpirationInterval;
public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) {
if (hostName == null) {
throw new IllegalArgumentException("hostName");
}
this.hostName = hostName;
this.minPartitionCount = minPartitionCount;
this.maxPartitionCount = maxPartitionCount;
this.leaseExpirationInterval = leaseExpirationInterval;
}
@Override
private static Lease getLeaseToSteal(
Map<String, Integer> workerToPartitionCount,
int target,
int partitionsNeededForMe,
Map<String, Lease> allPartitions) {
Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount);
if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) {
for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) {
if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) {
return entry.getValue();
}
}
}
return null;
}
private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) {
Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0);
for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) {
if (workerToStealFrom.getValue() <= entry.getValue()) {
workerToStealFrom = entry;
}
}
return workerToStealFrom;
}
private int calculateTargetPartitionCount(int partitionCount, int workerCount) {
int target = 1;
if (partitionCount > workerCount) {
target = (int)Math.ceil((double)partitionCount / workerCount);
}
if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) {
target = this.maxPartitionCount;
}
if (this.minPartitionCount > 0 && target < this.minPartitionCount) {
target = this.minPartitionCount;
}
return target;
}
private void categorizeLeases(
List<Lease> allLeases,
Map<String, Lease> allPartitions,
List<Lease> expiredLeases,
Map<String, Integer> workerToPartitionCount) {
for (Lease lease : allLeases) {
allPartitions.put(lease.getLeaseToken(), lease);
if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) {
expiredLeases.add(lease);
} else {
String assignedTo = lease.getOwner();
Integer count = workerToPartitionCount.get(assignedTo);
if (count != null) {
workerToPartitionCount.replace(assignedTo, count + 1);
} else {
workerToPartitionCount.put(assignedTo, 1);
}
}
}
if (!workerToPartitionCount.containsKey(this.hostName)) {
workerToPartitionCount.put(this.hostName, 0);
}
}
private boolean isExpired(Lease lease) {
if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) {
return true;
}
Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval);
this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now());
return leaseExpireTime.isBefore(Instant.now());
}
} |
I think it will be helpful to always log even though there is no lease to take | private Mono<Void> run(CancellationToken cancellationToken) {
return Flux.just(this)
.flatMap(value -> this.leaseContainer.getAllLeases())
.collectList()
.flatMap(allLeases -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases);
if (leasesToTake.size() > 0) {
this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size());
}
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return Flux.fromIterable(leasesToTake)
.flatMap(lease -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return this.partitionController.addOrUpdateLease(lease);
})
.then(Mono.just(this)
.flatMap(value -> {
if (cancellationToken.isCancellationRequested()) {
return Mono.empty();
}
Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval);
return Mono.just(value)
.delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL)
.repeat( () -> {
Instant currentTime = Instant.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
);
})
.onErrorResume(throwable -> {
logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable);
return Mono.empty();
})
.repeat(() -> {
return !cancellationToken.isCancellationRequested();
})
.then()
.onErrorResume(throwable -> {
logger.info("Partition load balancer task stopped.");
return this.stop();
});
} | this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); | private Mono<Void> run(CancellationToken cancellationToken) {
return Flux.just(this)
.flatMap(value -> this.leaseContainer.getAllLeases())
.collectList()
.flatMap(allLeases -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases);
if (leasesToTake.size() > 0) {
this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size());
}
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return Flux.fromIterable(leasesToTake)
.limitRate(1)
.flatMap(lease -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return this.partitionController.addOrUpdateLease(lease);
})
.then(Mono.just(this)
.flatMap(value -> {
if (cancellationToken.isCancellationRequested()) {
return Mono.empty();
}
Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval);
return Mono.just(value)
.delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL)
.repeat( () -> {
Instant currentTime = Instant.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
);
})
.onErrorResume(throwable -> {
logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable);
return Mono.empty();
})
.repeat(() -> {
return !cancellationToken.isCancellationRequested();
})
.then()
.onErrorResume(throwable -> {
logger.info("Partition load balancer task stopped.");
return this.stop();
});
} | class PartitionLoadBalancerImpl implements PartitionLoadBalancer {
private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class);
private final PartitionController partitionController;
private final LeaseContainer leaseContainer;
private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy;
private final Duration leaseAcquireInterval;
private final Scheduler scheduler;
private CancellationTokenSource cancellationTokenSource;
private volatile boolean started;
private final Object lock;
public PartitionLoadBalancerImpl(
PartitionController partitionController,
LeaseContainer leaseContainer,
PartitionLoadBalancingStrategy partitionLoadBalancingStrategy,
Duration leaseAcquireInterval,
Scheduler scheduler) {
if (partitionController == null) {
throw new IllegalArgumentException("partitionController");
}
if (leaseContainer == null) {
throw new IllegalArgumentException("leaseContainer");
}
if (partitionLoadBalancingStrategy == null) {
throw new IllegalArgumentException("partitionLoadBalancingStrategy");
}
if (scheduler == null) {
throw new IllegalArgumentException("executorService");
}
this.partitionController = partitionController;
this.leaseContainer = leaseContainer;
this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy;
this.leaseAcquireInterval = leaseAcquireInterval;
this.scheduler = scheduler;
this.started = false;
this.lock = new Object();
}
@Override
public Mono<Void> start() {
synchronized (lock) {
if (this.started) {
throw new IllegalStateException("Partition load balancer already started");
}
this.cancellationTokenSource = new CancellationTokenSource();
this.started = true;
}
return Mono.fromRunnable( () -> {
scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe());
});
}
@Override
public Mono<Void> stop() {
synchronized (lock) {
this.started = false;
this.cancellationTokenSource.cancel();
}
return this.partitionController.shutdown();
}
@Override
public boolean isRunning() {
return this.started;
}
} | class PartitionLoadBalancerImpl implements PartitionLoadBalancer {
private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class);
private final PartitionController partitionController;
private final LeaseContainer leaseContainer;
private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy;
private final Duration leaseAcquireInterval;
private final Scheduler scheduler;
private CancellationTokenSource cancellationTokenSource;
private volatile boolean started;
private final Object lock;
public PartitionLoadBalancerImpl(
PartitionController partitionController,
LeaseContainer leaseContainer,
PartitionLoadBalancingStrategy partitionLoadBalancingStrategy,
Duration leaseAcquireInterval,
Scheduler scheduler) {
if (partitionController == null) {
throw new IllegalArgumentException("partitionController");
}
if (leaseContainer == null) {
throw new IllegalArgumentException("leaseContainer");
}
if (partitionLoadBalancingStrategy == null) {
throw new IllegalArgumentException("partitionLoadBalancingStrategy");
}
if (scheduler == null) {
throw new IllegalArgumentException("executorService");
}
this.partitionController = partitionController;
this.leaseContainer = leaseContainer;
this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy;
this.leaseAcquireInterval = leaseAcquireInterval;
this.scheduler = scheduler;
this.started = false;
this.lock = new Object();
}
@Override
public Mono<Void> start() {
synchronized (lock) {
if (this.started) {
throw new IllegalStateException("Partition load balancer already started");
}
this.cancellationTokenSource = new CancellationTokenSource();
this.started = true;
}
return Mono.fromRunnable( () -> {
scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe());
});
}
@Override
public Mono<Void> stop() {
synchronized (lock) {
this.started = false;
this.cancellationTokenSource.cancel();
}
return this.partitionController.shutdown();
}
@Override
public boolean isRunning() {
return this.started;
}
} |
null check? | public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
} | this.scheduler = scheduler; | public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
if (scheduler == null) {
throw new IllegalArgumentException("scheduler");
}
this.scheduler = scheduler;
return this;
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
}
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
}
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
} |
We could but that will probably be a different change while we can test that out. My concern with introducing our own scheduler for CFP is that we need to test it based on the configuration we choose. If it's just the name of it, so logging is easier to understand, that might be a low hanging fruit. | public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
} | this.scheduler = Schedulers.boundedElastic(); | public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
if (scheduler == null) {
throw new IllegalArgumentException("scheduler");
}
this.scheduler = scheduler;
return this;
}
} |
actually no, because it will introduce more logs that we don't actually need. We know that this loop is executed, it's when leases are picked up either because they are expired or the equal load balancing is not achieved, that it's a problem. | private Mono<Void> run(CancellationToken cancellationToken) {
return Flux.just(this)
.flatMap(value -> this.leaseContainer.getAllLeases())
.collectList()
.flatMap(allLeases -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases);
if (leasesToTake.size() > 0) {
this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size());
}
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return Flux.fromIterable(leasesToTake)
.flatMap(lease -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return this.partitionController.addOrUpdateLease(lease);
})
.then(Mono.just(this)
.flatMap(value -> {
if (cancellationToken.isCancellationRequested()) {
return Mono.empty();
}
Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval);
return Mono.just(value)
.delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL)
.repeat( () -> {
Instant currentTime = Instant.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
);
})
.onErrorResume(throwable -> {
logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable);
return Mono.empty();
})
.repeat(() -> {
return !cancellationToken.isCancellationRequested();
})
.then()
.onErrorResume(throwable -> {
logger.info("Partition load balancer task stopped.");
return this.stop();
});
} | this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); | private Mono<Void> run(CancellationToken cancellationToken) {
return Flux.just(this)
.flatMap(value -> this.leaseContainer.getAllLeases())
.collectList()
.flatMap(allLeases -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases);
if (leasesToTake.size() > 0) {
this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size());
}
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return Flux.fromIterable(leasesToTake)
.limitRate(1)
.flatMap(lease -> {
if (cancellationToken.isCancellationRequested()) return Mono.empty();
return this.partitionController.addOrUpdateLease(lease);
})
.then(Mono.just(this)
.flatMap(value -> {
if (cancellationToken.isCancellationRequested()) {
return Mono.empty();
}
Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval);
return Mono.just(value)
.delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL)
.repeat( () -> {
Instant currentTime = Instant.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
);
})
.onErrorResume(throwable -> {
logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable);
return Mono.empty();
})
.repeat(() -> {
return !cancellationToken.isCancellationRequested();
})
.then()
.onErrorResume(throwable -> {
logger.info("Partition load balancer task stopped.");
return this.stop();
});
} | class PartitionLoadBalancerImpl implements PartitionLoadBalancer {
private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class);
private final PartitionController partitionController;
private final LeaseContainer leaseContainer;
private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy;
private final Duration leaseAcquireInterval;
private final Scheduler scheduler;
private CancellationTokenSource cancellationTokenSource;
private volatile boolean started;
private final Object lock;
public PartitionLoadBalancerImpl(
PartitionController partitionController,
LeaseContainer leaseContainer,
PartitionLoadBalancingStrategy partitionLoadBalancingStrategy,
Duration leaseAcquireInterval,
Scheduler scheduler) {
if (partitionController == null) {
throw new IllegalArgumentException("partitionController");
}
if (leaseContainer == null) {
throw new IllegalArgumentException("leaseContainer");
}
if (partitionLoadBalancingStrategy == null) {
throw new IllegalArgumentException("partitionLoadBalancingStrategy");
}
if (scheduler == null) {
throw new IllegalArgumentException("executorService");
}
this.partitionController = partitionController;
this.leaseContainer = leaseContainer;
this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy;
this.leaseAcquireInterval = leaseAcquireInterval;
this.scheduler = scheduler;
this.started = false;
this.lock = new Object();
}
@Override
public Mono<Void> start() {
synchronized (lock) {
if (this.started) {
throw new IllegalStateException("Partition load balancer already started");
}
this.cancellationTokenSource = new CancellationTokenSource();
this.started = true;
}
return Mono.fromRunnable( () -> {
scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe());
});
}
@Override
public Mono<Void> stop() {
synchronized (lock) {
this.started = false;
this.cancellationTokenSource.cancel();
}
return this.partitionController.shutdown();
}
@Override
public boolean isRunning() {
return this.started;
}
} | class PartitionLoadBalancerImpl implements PartitionLoadBalancer {
private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class);
private final PartitionController partitionController;
private final LeaseContainer leaseContainer;
private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy;
private final Duration leaseAcquireInterval;
private final Scheduler scheduler;
private CancellationTokenSource cancellationTokenSource;
private volatile boolean started;
private final Object lock;
public PartitionLoadBalancerImpl(
PartitionController partitionController,
LeaseContainer leaseContainer,
PartitionLoadBalancingStrategy partitionLoadBalancingStrategy,
Duration leaseAcquireInterval,
Scheduler scheduler) {
if (partitionController == null) {
throw new IllegalArgumentException("partitionController");
}
if (leaseContainer == null) {
throw new IllegalArgumentException("leaseContainer");
}
if (partitionLoadBalancingStrategy == null) {
throw new IllegalArgumentException("partitionLoadBalancingStrategy");
}
if (scheduler == null) {
throw new IllegalArgumentException("executorService");
}
this.partitionController = partitionController;
this.leaseContainer = leaseContainer;
this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy;
this.leaseAcquireInterval = leaseAcquireInterval;
this.scheduler = scheduler;
this.started = false;
this.lock = new Object();
}
@Override
public Mono<Void> start() {
synchronized (lock) {
if (this.started) {
throw new IllegalStateException("Partition load balancer already started");
}
this.cancellationTokenSource = new CancellationTokenSource();
this.started = true;
}
return Mono.fromRunnable( () -> {
scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe());
});
}
@Override
public Mono<Void> stop() {
synchronized (lock) {
this.started = false;
this.cancellationTokenSource.cancel();
}
return this.partitionController.shutdown();
}
@Override
public boolean isRunning() {
return this.started;
}
} |
good catch; I'll get it fixed right way | public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
} | this.scheduler = scheduler; | public ChangeFeedProcessorOptions setScheduler(Scheduler scheduler) {
if (scheduler == null) {
throw new IllegalArgumentException("scheduler");
}
this.scheduler = scheduler;
return this;
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
}
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
} | class ChangeFeedProcessorOptions {
/**
* Default renew interval.
*/
public static final Duration DEFAULT_RENEW_INTERVAL = Duration.ofMillis(0).plusSeconds(17);
/**
* Default acquire interval.
*/
public static final Duration DEFAULT_ACQUIRE_INTERVAL = Duration.ofMillis(0).plusSeconds(13);
/**
* Default expiration interval.
*/
public static final Duration DEFAULT_EXPIRATION_INTERVAL = Duration.ofMillis(0).plusSeconds(60);
/**
* Default feed poll delay.
*/
public static final Duration DEFAULT_FEED_POLL_DELAY = Duration.ofMillis(0).plusSeconds(5);
private Duration leaseRenewInterval;
private Duration leaseAcquireInterval;
private Duration leaseExpirationInterval;
private Duration feedPollDelay;
private String leasePrefix;
private int maxItemCount;
private String startContinuation;
private Instant startTime;
private boolean startFromBeginning;
private int minScaleCount;
private int maxScaleCount;
private Scheduler scheduler;
/**
* Instantiates a new Change feed processor options.
*/
public ChangeFeedProcessorOptions() {
this.maxItemCount = 100;
this.startFromBeginning = false;
this.leaseRenewInterval = DEFAULT_RENEW_INTERVAL;
this.leaseAcquireInterval = DEFAULT_ACQUIRE_INTERVAL;
this.leaseExpirationInterval = DEFAULT_EXPIRATION_INTERVAL;
this.feedPollDelay = DEFAULT_FEED_POLL_DELAY;
this.maxScaleCount = 0;
this.scheduler = Schedulers.boundedElastic();
}
/**
* Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @return the renew interval for all leases for partitions.
*/
public Duration getLeaseRenewInterval() {
return this.leaseRenewInterval;
}
/**
* Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance.
*
* @param leaseRenewInterval the renew interval for all leases for partitions currently held by
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseRenewInterval(Duration leaseRenewInterval) {
this.leaseRenewInterval = leaseRenewInterval;
return this;
}
/**
* Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @return the interval to kick off a task to compute if partitions are distributed evenly among known host
* instances.
*/
public Duration getLeaseAcquireInterval() {
return this.leaseAcquireInterval;
}
/**
* Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances.
*
* @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly
* among known host instances.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseAcquireInterval(Duration leaseAcquireInterval) {
this.leaseAcquireInterval = leaseAcquireInterval;
return this;
}
/**
* Gets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @return the interval for which the lease is taken on a lease representing a partition.
*/
public Duration getLeaseExpirationInterval() {
return this.leaseExpirationInterval;
}
/**
* Sets the interval for which the lease is taken on a lease representing a partition.
*
* <p>
* If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will
* move to another {@link ChangeFeedProcessor} instance.
*
* @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeaseExpirationInterval(Duration leaseExpirationInterval) {
this.leaseExpirationInterval = leaseExpirationInterval;
return this;
}
/**
* Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @return the delay in between polling a partition for new changes on the feed.
*/
public Duration getFeedPollDelay() {
return this.feedPollDelay;
}
/**
* Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained.
*
* @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current
* changes are drained.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setFeedPollDelay(Duration feedPollDelay) {
this.feedPollDelay = feedPollDelay;
return this;
}
/**
* Gets a prefix to be used as part of the lease ID.
* <p>
* This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same
* feed while using the same auxiliary container.
*
* @return a prefix to be used as part of the lease ID.
*/
public String getLeasePrefix() {
return this.leasePrefix;
}
/**
* Sets a prefix to be used as part of the lease ID.
*
* @param leasePrefix a prefix to be used as part of the lease ID.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setLeasePrefix(String leasePrefix) {
this.leasePrefix = leasePrefix;
return this;
}
/**
* Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*
* @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service.
*/
public int getMaxItemCount() {
return this.maxItemCount;
}
/**
* Sets the maximum number of items to be returned in the enumeration operation.
*
* @param maxItemCount the maximum number of items to be returned in the enumeration operation.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxItemCount(int maxItemCount) {
this.maxItemCount = maxItemCount;
return this;
}
/**
* Gets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public String getStartContinuation() {
return this.startContinuation;
}
/**
* Sets the start request continuation token to start looking for changes after.
* <p>
* This option can be used when lease store is not initialized and it is ignored if a lease item exists and
* has continuation token that is not null. If this is specified, both StartTime and StartFromBeginning are ignored.
*
* @param startContinuation the start request continuation token to start looking for changes after.
* @return the string representing a continuation token that will be used to get item feeds starting with.
*/
public ChangeFeedProcessorOptions setStartContinuation(String startContinuation) {
this.startContinuation = startContinuation;
return this;
}
/**
* Gets the time (exclusive) to start looking for changes after.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @return the time (exclusive) to start looking for changes after.
*/
public Instant getStartTime() {
return this.startTime;
}
/**
* Sets the time (exclusive) to start looking for changes after (UTC time).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* If this option is specified, "start from beginning" option is ignored.
*
* @param startTime the time (exclusive) to start looking for changes after.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartTime(Instant startTime) {
this.startTime = startTime;
return this;
}
/**
* Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true)
* or from current (false). By default it's start from current (false).
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @return a value indicating whether change feed in the Azure Cosmos DB service should start from.
*/
public boolean isStartFromBeginning() {
return this.startFromBeginning;
}
/**
* Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning.
* <p>
* This option can be used when:
* (1) Lease items are not initialized; this setting will be ignored if the lease items exists and have a
* valid continuation token.
* (2) Start continuation token option is not specified.
* (3) Start time option is not specified.
*
* @param startFromBeginning Indicates to start from beginning if true
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setStartFromBeginning(boolean startFromBeginning) {
this.startFromBeginning = startFromBeginning;
return this;
}
/**
* Gets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @return the minimum scale count for the host.
*/
public int getMinScaleCount() {
return this.minScaleCount;
}
/**
* Sets the minimum partition count (parallel workers) for the current host.
* <p>
* This option can be used to increase the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts.
*
* @param minScaleCount the minimum partition count for the host.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMinScaleCount(int minScaleCount) {
this.minScaleCount = minScaleCount;
return this;
}
/**
* Gets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @return the maximum number of partitions (parallel workers) the host can run.
*/
public int getMaxScaleCount() {
return this.maxScaleCount;
}
/**
* Sets the maximum number of partitions (parallel workers) the host can run.
* <p>
* This option can be used to limit the number of partitions (parallel workers) for the host and thus override
* the default equal distribution of leases between multiple hosts. Default setting is "0", unlimited.
*
* @param maxScaleCount the maximum number of partitions (parallel workers) the host can run.
* @return the current ChangeFeedProcessorOptions instance.
*/
public ChangeFeedProcessorOptions setMaxScaleCount(int maxScaleCount) {
this.maxScaleCount = maxScaleCount;
return this;
}
/**
* Gets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @return a {@link Scheduler} that hosts a pool of ExecutorService-based workers..
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
public Scheduler getScheduler() {
return this.scheduler;
}
/**
* Sets the internal {@link Scheduler} that hosts a pool of ExecutorService-based workers for any change feed processor related tasks.
*
* @param scheduler a {@link Scheduler} that hosts a pool of ExecutorService-based workers.
* {@link ChangeFeedProcessor} instance.
* @return the current ChangeFeedProcessorOptions instance.
*/
@Beta(value = Beta.SinceVersion.V4_26_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
} |
When maxScaleCount is set to a non-zero value, it's ok to be greedy because there's a cap to how much it can be assigned to one CFP instance. I agree that the system might still go though couple rounds of load balancing itself but much less than when no cap is set. The idea here is that as new instances are introduced into the system, they should start the work immediately on all the partitions that are available rather than only taking one at the time. | public List<Lease> selectLeasesToTake(List<Lease> allLeases) {
Map<String, Integer> workerToPartitionCount = new HashMap<>();
List<Lease> expiredLeases = new ArrayList<>();
Map<String, Lease> allPartitions = new HashMap<>();
this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount);
int partitionCount = allPartitions.size();
int workerCount = workerToPartitionCount.size();
if (partitionCount <= 0) {
return new ArrayList<Lease>();
}
int target = this.calculateTargetPartitionCount(partitionCount, workerCount);
int myCount = workerToPartitionCount.get(this.hostName);
int partitionsNeededForMe = target - myCount;
if (expiredLeases.size() > 0) {
if (this.maxPartitionCount == 0 && (partitionsNeededForMe <= 0 || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1))) {
partitionsNeededForMe = 1;
}
for (Lease lease : expiredLeases) {
this.logger.info("Found unused or expired lease {}; previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ",
lease.getLeaseToken(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount);
}
return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size()));
}
if (partitionsNeededForMe <= 0)
return new ArrayList<Lease>();
Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions);
List<Lease> stolenLeases = new ArrayList<>();
if (stolenLease != null) {
stolenLeases.add(stolenLease);
}
return stolenLeases;
} | if (this.maxPartitionCount == 0 && (partitionsNeededForMe <= 0 || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1))) { | public List<Lease> selectLeasesToTake(List<Lease> allLeases) {
Map<String, Integer> workerToPartitionCount = new HashMap<>();
List<Lease> expiredLeases = new ArrayList<>();
Map<String, Lease> allPartitions = new HashMap<>();
this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount);
int partitionCount = allPartitions.size();
int workerCount = workerToPartitionCount.size();
if (partitionCount <= 0) {
return new ArrayList<Lease>();
}
int target = this.calculateTargetPartitionCount(partitionCount, workerCount);
int myCount = workerToPartitionCount.get(this.hostName);
int partitionsNeededForMe = target - myCount;
if (expiredLeases.size() > 0) {
if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) {
partitionsNeededForMe = 1;
}
if (partitionsNeededForMe == 1) {
Random random = new Random();
Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size()));
this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ",
expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount);
return Collections.singletonList(expiredLease);
} else {
for (Lease lease : expiredLeases) {
this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ",
lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount);
}
}
return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size()));
}
if (partitionsNeededForMe <= 0)
return new ArrayList<Lease>();
Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions);
List<Lease> stolenLeases = new ArrayList<>();
if (stolenLease != null) {
stolenLeases.add(stolenLease);
}
return stolenLeases;
} | class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy {
private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class);
private final String hostName;
private final int minPartitionCount;
private final int maxPartitionCount;
private final Duration leaseExpirationInterval;
public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) {
if (hostName == null) {
throw new IllegalArgumentException("hostName");
}
this.hostName = hostName;
this.minPartitionCount = minPartitionCount;
this.maxPartitionCount = maxPartitionCount;
this.leaseExpirationInterval = leaseExpirationInterval;
}
@Override
private static Lease getLeaseToSteal(
Map<String, Integer> workerToPartitionCount,
int target,
int partitionsNeededForMe,
Map<String, Lease> allPartitions) {
Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount);
if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) {
for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) {
if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) {
return entry.getValue();
}
}
}
return null;
}
private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) {
Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0);
for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) {
if (workerToStealFrom.getValue() <= entry.getValue()) {
workerToStealFrom = entry;
}
}
return workerToStealFrom;
}
private int calculateTargetPartitionCount(int partitionCount, int workerCount) {
int target = 1;
if (partitionCount > workerCount) {
target = (int)Math.ceil((double)partitionCount / workerCount);
}
if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) {
target = this.maxPartitionCount;
}
if (this.minPartitionCount > 0 && target < this.minPartitionCount) {
target = this.minPartitionCount;
}
return target;
}
private void categorizeLeases(
List<Lease> allLeases,
Map<String, Lease> allPartitions,
List<Lease> expiredLeases,
Map<String, Integer> workerToPartitionCount) {
for (Lease lease : allLeases) {
allPartitions.put(lease.getLeaseToken(), lease);
if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) {
expiredLeases.add(lease);
} else {
String assignedTo = lease.getOwner();
Integer count = workerToPartitionCount.get(assignedTo);
if (count != null) {
workerToPartitionCount.replace(assignedTo, count + 1);
} else {
workerToPartitionCount.put(assignedTo, 1);
}
}
}
if (!workerToPartitionCount.containsKey(this.hostName)) {
workerToPartitionCount.put(this.hostName, 0);
}
}
private boolean isExpired(Lease lease) {
if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) {
return true;
}
Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval);
this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now());
return leaseExpireTime.isBefore(Instant.now());
}
} | class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy {
private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class);
private final String hostName;
private final int minPartitionCount;
private final int maxPartitionCount;
private final Duration leaseExpirationInterval;
public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) {
if (hostName == null) {
throw new IllegalArgumentException("hostName");
}
this.hostName = hostName;
this.minPartitionCount = minPartitionCount;
this.maxPartitionCount = maxPartitionCount;
this.leaseExpirationInterval = leaseExpirationInterval;
}
@Override
private static Lease getLeaseToSteal(
Map<String, Integer> workerToPartitionCount,
int target,
int partitionsNeededForMe,
Map<String, Lease> allPartitions) {
Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount);
if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) {
for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) {
if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) {
return entry.getValue();
}
}
}
return null;
}
private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) {
Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0);
for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) {
if (workerToStealFrom.getValue() <= entry.getValue()) {
workerToStealFrom = entry;
}
}
return workerToStealFrom;
}
private int calculateTargetPartitionCount(int partitionCount, int workerCount) {
int target = 1;
if (partitionCount > workerCount) {
target = (int)Math.ceil((double)partitionCount / workerCount);
}
if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) {
target = this.maxPartitionCount;
}
if (this.minPartitionCount > 0 && target < this.minPartitionCount) {
target = this.minPartitionCount;
}
return target;
}
private void categorizeLeases(
List<Lease> allLeases,
Map<String, Lease> allPartitions,
List<Lease> expiredLeases,
Map<String, Integer> workerToPartitionCount) {
for (Lease lease : allLeases) {
allPartitions.put(lease.getLeaseToken(), lease);
if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) {
expiredLeases.add(lease);
} else {
String assignedTo = lease.getOwner();
Integer count = workerToPartitionCount.get(assignedTo);
if (count != null) {
workerToPartitionCount.replace(assignedTo, count + 1);
} else {
workerToPartitionCount.put(assignedTo, 1);
}
}
}
if (!workerToPartitionCount.containsKey(this.hostName)) {
workerToPartitionCount.put(this.hostName, 0);
}
}
private boolean isExpired(Lease lease) {
if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) {
return true;
}
Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval);
this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now());
return leaseExpireTime.isBefore(Instant.now());
}
} |
Does AbstractAzureServiceClientBuilderFactory has inheritance relationship with the above AbstractAzureCredentialBuilderFactory? | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | if (bean instanceof AbstractAzureServiceClientBuilderFactory | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} |
same as above. | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | this.defaultAzureCredential = defaultAzureCredential; | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | class DefaultEventHubsNamespaceProducerFactory implements EventHubsProducerFactory, DisposableBean {
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final PropertiesSupplier<String, ProducerProperties> propertiesSupplier;
private final Map<String, EventHubProducerAsyncClient> clients = new ConcurrentHashMap<>();
private final ProducerPropertiesParentMerger parentMerger = new ProducerPropertiesParentMerger();
private AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided namespace level configuration.
* @param namespaceProperties the namespace properties
*/
public DefaultEventHubsNamespaceProducerFactory(NamespaceProperties namespaceProperties) {
this(namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided namespace level configuration and producer {@link PropertiesSupplier}.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProducerProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProducerFactory(NamespaceProperties namespaceProperties,
PropertiesSupplier<String, ProducerProperties> supplier) {
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public EventHubProducerAsyncClient createProducer(String eventHub) {
return doCreateProducer(eventHub, this.propertiesSupplier.getProperties(eventHub));
}
private EventHubProducerAsyncClient doCreateProducer(String eventHub, @Nullable ProducerProperties properties) {
return clients.computeIfAbsent(eventHub, entityName -> {
ProducerProperties producerProperties = parentMerger.mergeParent(properties, this.namespaceProperties);
producerProperties.setEventHubName(entityName);
EventHubClientBuilderFactory factory = new EventHubClientBuilderFactory(producerProperties);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
EventHubProducerAsyncClient producerClient = factory.build().buildAsyncProducerClient();
this.listeners.forEach(l -> l.producerAdded(entityName, producerClient));
return producerClient;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
@Override
public void destroy() {
this.clients.forEach((name, client) -> {
this.listeners.forEach(l -> l.producerRemoved(name, client));
client.close();
});
this.clients.clear();
this.listeners.clear();
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} | class DefaultEventHubsNamespaceProducerFactory implements EventHubsProducerFactory, DisposableBean {
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final PropertiesSupplier<String, ProducerProperties> propertiesSupplier;
private final Map<String, EventHubProducerAsyncClient> clients = new ConcurrentHashMap<>();
private final ProducerPropertiesParentMerger parentMerger = new ProducerPropertiesParentMerger();
private AzureCredentialResolver<TokenCredential> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided namespace level configuration.
* @param namespaceProperties the namespace properties
*/
public DefaultEventHubsNamespaceProducerFactory(NamespaceProperties namespaceProperties) {
this(namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided namespace level configuration and producer {@link PropertiesSupplier}.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProducerProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProducerFactory(NamespaceProperties namespaceProperties,
PropertiesSupplier<String, ProducerProperties> supplier) {
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public EventHubProducerAsyncClient createProducer(String eventHub) {
return doCreateProducer(eventHub, this.propertiesSupplier.getProperties(eventHub));
}
private EventHubProducerAsyncClient doCreateProducer(String eventHub, @Nullable ProducerProperties properties) {
return clients.computeIfAbsent(eventHub, entityName -> {
ProducerProperties producerProperties = parentMerger.mergeParent(properties, this.namespaceProperties);
producerProperties.setEventHubName(entityName);
EventHubClientBuilderFactory factory = new EventHubClientBuilderFactory(producerProperties);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
EventHubProducerAsyncClient producerClient = factory.build().buildAsyncProducerClient();
this.listeners.forEach(l -> l.producerAdded(entityName, producerClient));
return producerClient;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
@Override
public void destroy() {
this.clients.forEach((name, client) -> {
this.listeners.forEach(l -> l.producerRemoved(name, client));
client.close();
});
this.clients.clear();
this.listeners.clear();
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<TokenCredential> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} |
Why this dependency can not be auto-wired as normal Bean Definition? | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | (TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)); | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} |
As per the above field declaration. default value is meant to be final. | public void setDefaultTokenCredential(TokenCredential defaultTokenCredential) {
if (defaultTokenCredential != null) {
this.defaultTokenCredential = defaultTokenCredential;
}
} | this.defaultTokenCredential = defaultTokenCredential; | public void setDefaultTokenCredential(TokenCredential defaultTokenCredential) {
if (defaultTokenCredential != null) {
this.defaultTokenCredential = defaultTokenCredential;
} else {
LOGGER.debug("Will ignore the 'null' default token credential.");
}
} | class {} | class {} |
what if the passed in argument is null? may add? | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | this.clientFactoryCustomizers = clientFactoryCustomizers; | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | class EventHubsMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<EventHubsConsumerProperties>, ExtendedProducerProperties<EventHubsProducerProperties>, EventHubsChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, EventHubsConsumerProperties, EventHubsProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageChannelBinder.class);
private static final ExpressionParser EXPRESSION_PARSER = new SpelExpressionParser();
private NamespaceProperties namespaceProperties;
private EventHubsTemplate eventHubsTemplate;
private CheckpointStore checkpointStore;
private EventHubsProcessorContainer processorContainer;
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private EventHubsExtendedBindingProperties bindingProperties = new EventHubsExtendedBindingProperties();
private final Map<String, ExtendedProducerProperties<EventHubsProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<EventHubsConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link EventHubsMessageChannelBinder} with the specified headers to embed and {@link EventHubsChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public EventHubsMessageChannelBinder(String[] headersToEmbed, EventHubsChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<EventHubsProducerProperties> producerProperties,
MessageChannel errorChannel) {
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
Assert.notNull(getEventHubTemplate(), "eventHubsTemplate can't be null when create a producer");
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.eventHubsTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionIdExpression(
EXPRESSION_PARSER.parseExpression("headers['" + BinderHeaders.PARTITION_HEADER + "']"));
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<EventHubsConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
Assert.notNull(getProcessorContainer(), "eventProcessorsContainer can't be null when create a consumer");
boolean anonymous = !StringUtils.hasText(group);
if (anonymous) {
group = "anonymous." + UUID.randomUUID();
}
EventHubsInboundChannelAdapter inboundAdapter;
if (properties.isBatchMode()) {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, ListenerMode.BATCH, properties.getExtension().getCheckpoint());
} else {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, properties.getExtension().getCheckpoint());
}
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + group);
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, group, properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
return inboundAdapter;
}
@Override
public EventHubsConsumerProperties getExtendedConsumerProperties(String destination) {
return this.bindingProperties.getExtendedConsumerProperties(destination);
}
@Override
public EventHubsProducerProperties getExtendedProducerProperties(String destination) {
return this.bindingProperties.getExtendedProducerProperties(destination);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(EventHubsExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
EventHubsProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEventHubName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
EventHubsConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
private EventHubsTemplate getEventHubTemplate() {
if (this.eventHubsTemplate == null) {
DefaultEventHubsNamespaceProducerFactory factory = new DefaultEventHubsNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, producerAsyncClient) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.eventHubsTemplate = new EventHubsTemplate(factory);
}
return this.eventHubsTemplate;
}
private EventHubsProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultEventHubsNamespaceProcessorFactory factory = new DefaultEventHubsNamespaceProcessorFactory(
this.checkpointStore, this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, consumerGroup, processorClient) -> {
String instrumentationName = name + "/" + consumerGroup;
Instrumentation instrumentation = new EventHubsProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new EventHubsProcessorContainer(factory);
}
return this.processorContainer;
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set checkpoint store.
*
* @param checkpointStore the checkpoint store
*/
public void setCheckpointStore(CheckpointStore checkpointStore) {
this.checkpointStore = checkpointStore;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
* @see InstrumentationManager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} | class EventHubsMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<EventHubsConsumerProperties>, ExtendedProducerProperties<EventHubsProducerProperties>, EventHubsChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, EventHubsConsumerProperties, EventHubsProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageChannelBinder.class);
private static final ExpressionParser EXPRESSION_PARSER = new SpelExpressionParser();
private NamespaceProperties namespaceProperties;
private EventHubsTemplate eventHubsTemplate;
private CheckpointStore checkpointStore;
private EventHubsProcessorContainer processorContainer;
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private EventHubsExtendedBindingProperties bindingProperties = new EventHubsExtendedBindingProperties();
private final Map<String, ExtendedProducerProperties<EventHubsProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<EventHubsConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link EventHubsMessageChannelBinder} with the specified headers to embed and {@link EventHubsChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public EventHubsMessageChannelBinder(String[] headersToEmbed, EventHubsChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<EventHubsProducerProperties> producerProperties,
MessageChannel errorChannel) {
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
Assert.notNull(getEventHubTemplate(), "eventHubsTemplate can't be null when create a producer");
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.eventHubsTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionIdExpression(
EXPRESSION_PARSER.parseExpression("headers['" + BinderHeaders.PARTITION_HEADER + "']"));
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<EventHubsConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
Assert.notNull(getProcessorContainer(), "eventProcessorsContainer can't be null when create a consumer");
boolean anonymous = !StringUtils.hasText(group);
if (anonymous) {
group = "anonymous." + UUID.randomUUID();
}
EventHubsInboundChannelAdapter inboundAdapter;
if (properties.isBatchMode()) {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, ListenerMode.BATCH, properties.getExtension().getCheckpoint());
} else {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, properties.getExtension().getCheckpoint());
}
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + group);
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, group, properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
return inboundAdapter;
}
@Override
public EventHubsConsumerProperties getExtendedConsumerProperties(String destination) {
return this.bindingProperties.getExtendedConsumerProperties(destination);
}
@Override
public EventHubsProducerProperties getExtendedProducerProperties(String destination) {
return this.bindingProperties.getExtendedProducerProperties(destination);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(EventHubsExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
EventHubsProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEventHubName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
EventHubsConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
private EventHubsTemplate getEventHubTemplate() {
if (this.eventHubsTemplate == null) {
DefaultEventHubsNamespaceProducerFactory factory = new DefaultEventHubsNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, producerAsyncClient) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.eventHubsTemplate = new EventHubsTemplate(factory);
}
return this.eventHubsTemplate;
}
private EventHubsProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultEventHubsNamespaceProcessorFactory factory = new DefaultEventHubsNamespaceProcessorFactory(
this.checkpointStore, this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, consumerGroup, processorClient) -> {
String instrumentationName = name + "/" + consumerGroup;
Instrumentation instrumentation = new EventHubsProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new EventHubsProcessorContainer(factory);
}
return this.processorContainer;
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set checkpoint store.
*
* @param checkpointStore the checkpoint store
*/
public void setCheckpointStore(CheckpointStore checkpointStore) {
this.checkpointStore = checkpointStore;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
* @see InstrumentationManager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} |
as above. null check? | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | this.clientFactoryCustomizers = clientFactoryCustomizers; | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | class ServiceBusMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<ServiceBusConsumerProperties>,
ExtendedProducerProperties<ServiceBusProducerProperties>,
ServiceBusChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, ServiceBusConsumerProperties, ServiceBusProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageChannelBinder.class);
private static final DefaultErrorMessageStrategy DEFAULT_ERROR_MESSAGE_STRATEGY = new DefaultErrorMessageStrategy();
private static final String EXCEPTION_MESSAGE = "exception-message";
private ServiceBusExtendedBindingProperties bindingProperties = new ServiceBusExtendedBindingProperties();
private NamespaceProperties namespaceProperties;
private ServiceBusTemplate serviceBusTemplate;
private ServiceBusProcessorContainer processorContainer;
private ServiceBusMessageConverter messageConverter = new ServiceBusMessageConverter();
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private final Map<String, ExtendedProducerProperties<ServiceBusProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<ServiceBusConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link ServiceBusMessageChannelBinder} with the specified headersToEmbed and {@link ServiceBusChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public ServiceBusMessageChannelBinder(String[] headersToEmbed, ServiceBusChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<ServiceBusProducerProperties> producerProperties,
MessageChannel errorChannel) {
Assert.notNull(getServiceBusTemplate(), "ServiceBusTemplate can't be null when create a producer");
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.serviceBusTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionKeyExpressionString(
"'partitionKey-' + headers['" + BinderHeaders.PARTITION_HEADER + "']");
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
final ServiceBusInboundChannelAdapter inboundAdapter;
inboundAdapter = new ServiceBusInboundChannelAdapter(getProcessorContainer(), destination.getName(), group,
buildCheckpointConfig(properties));
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + getGroup(group));
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, getGroup(group), properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
inboundAdapter.setMessageConverter(messageConverter);
return inboundAdapter;
}
@Override
protected MessageHandler getErrorMessageHandler(ConsumerDestination destination,
String group,
final ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return message -> {
Assert.state(message instanceof ErrorMessage, "Expected an ErrorMessage, not a "
+ message.getClass().toString() + " for: " + message);
ErrorMessage errorMessage = (ErrorMessage) message;
Message<?> amqpMessage = errorMessage.getOriginalMessage();
if (amqpMessage == null) {
logger.error("No raw message header in " + message);
} else {
Throwable cause = (Throwable) message.getPayload();
if (properties.getExtension().isRequeueRejected()) {
deadLetter(destination.getName(), amqpMessage, EXCEPTION_MESSAGE,
cause.getCause() != null ? cause.getCause().getMessage() : cause.getMessage());
} else {
abandon(destination.getName(), amqpMessage);
}
}
};
}
/**
* Moves a message to the dead-letter sub-queue with dead-letter reason.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
* @param deadLetterReason the dead-letter reason
* @param deadLetterErrorDescription the dead-letter error description
*/
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
/**
* Abandons the message in this context.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
*/
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
public ServiceBusConsumerProperties getExtendedConsumerProperties(String channelName) {
return this.bindingProperties.getExtendedConsumerProperties(channelName);
}
@Override
public ServiceBusProducerProperties getExtendedProducerProperties(String channelName) {
return this.bindingProperties.getExtendedProducerProperties(channelName);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
@Override
protected ErrorMessageStrategy getErrorMessageStrategy() {
return DEFAULT_ERROR_MESSAGE_STRATEGY;
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(ServiceBusExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private CheckpointConfig buildCheckpointConfig(
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return new CheckpointConfig(properties.getExtension().getCheckpointMode());
}
private ServiceBusTemplate getServiceBusTemplate() {
if (this.serviceBusTemplate == null) {
DefaultServiceBusNamespaceProducerFactory factory = new DefaultServiceBusNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, client) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.serviceBusTemplate = new ServiceBusTemplate(factory);
}
return this.serviceBusTemplate;
}
private ServiceBusProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultServiceBusNamespaceProcessorFactory factory = new DefaultServiceBusNamespaceProcessorFactory(
this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, subscription, client) -> {
String instrumentationName = name + "/" + getGroup(subscription);
Instrumentation instrumentation = new ServiceBusProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new ServiceBusProcessorContainer(factory);
}
return this.processorContainer;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
ServiceBusProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEntityName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
ServiceBusConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEntityName(key.getDestination());
consumerProperties.setSubscriptionName(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set message converter.
*
* @param messageConverter the message converter
*/
public void setMessageConverter(ServiceBusMessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
private String getGroup(String group) {
return group != null ? group : "";
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} | class ServiceBusMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<ServiceBusConsumerProperties>,
ExtendedProducerProperties<ServiceBusProducerProperties>,
ServiceBusChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, ServiceBusConsumerProperties, ServiceBusProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageChannelBinder.class);
private static final DefaultErrorMessageStrategy DEFAULT_ERROR_MESSAGE_STRATEGY = new DefaultErrorMessageStrategy();
private static final String EXCEPTION_MESSAGE = "exception-message";
private ServiceBusExtendedBindingProperties bindingProperties = new ServiceBusExtendedBindingProperties();
private NamespaceProperties namespaceProperties;
private ServiceBusTemplate serviceBusTemplate;
private ServiceBusProcessorContainer processorContainer;
private ServiceBusMessageConverter messageConverter = new ServiceBusMessageConverter();
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private final Map<String, ExtendedProducerProperties<ServiceBusProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<ServiceBusConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link ServiceBusMessageChannelBinder} with the specified headersToEmbed and {@link ServiceBusChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public ServiceBusMessageChannelBinder(String[] headersToEmbed, ServiceBusChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<ServiceBusProducerProperties> producerProperties,
MessageChannel errorChannel) {
Assert.notNull(getServiceBusTemplate(), "ServiceBusTemplate can't be null when create a producer");
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.serviceBusTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionKeyExpressionString(
"'partitionKey-' + headers['" + BinderHeaders.PARTITION_HEADER + "']");
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
final ServiceBusInboundChannelAdapter inboundAdapter;
inboundAdapter = new ServiceBusInboundChannelAdapter(getProcessorContainer(), destination.getName(), group,
buildCheckpointConfig(properties));
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + getGroup(group));
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, getGroup(group), properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
inboundAdapter.setMessageConverter(messageConverter);
return inboundAdapter;
}
@Override
protected MessageHandler getErrorMessageHandler(ConsumerDestination destination,
String group,
final ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return message -> {
Assert.state(message instanceof ErrorMessage, "Expected an ErrorMessage, not a "
+ message.getClass().toString() + " for: " + message);
ErrorMessage errorMessage = (ErrorMessage) message;
Message<?> amqpMessage = errorMessage.getOriginalMessage();
if (amqpMessage == null) {
logger.error("No raw message header in " + message);
} else {
Throwable cause = (Throwable) message.getPayload();
if (properties.getExtension().isRequeueRejected()) {
deadLetter(destination.getName(), amqpMessage, EXCEPTION_MESSAGE,
cause.getCause() != null ? cause.getCause().getMessage() : cause.getMessage());
} else {
abandon(destination.getName(), amqpMessage);
}
}
};
}
/**
* Moves a message to the dead-letter sub-queue with dead-letter reason.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
* @param deadLetterReason the dead-letter reason
* @param deadLetterErrorDescription the dead-letter error description
*/
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
/**
* Abandons the message in this context.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
*/
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
public ServiceBusConsumerProperties getExtendedConsumerProperties(String channelName) {
return this.bindingProperties.getExtendedConsumerProperties(channelName);
}
@Override
public ServiceBusProducerProperties getExtendedProducerProperties(String channelName) {
return this.bindingProperties.getExtendedProducerProperties(channelName);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
@Override
protected ErrorMessageStrategy getErrorMessageStrategy() {
return DEFAULT_ERROR_MESSAGE_STRATEGY;
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(ServiceBusExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private CheckpointConfig buildCheckpointConfig(
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return new CheckpointConfig(properties.getExtension().getCheckpointMode());
}
private ServiceBusTemplate getServiceBusTemplate() {
if (this.serviceBusTemplate == null) {
DefaultServiceBusNamespaceProducerFactory factory = new DefaultServiceBusNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, client) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.serviceBusTemplate = new ServiceBusTemplate(factory);
}
return this.serviceBusTemplate;
}
private ServiceBusProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultServiceBusNamespaceProcessorFactory factory = new DefaultServiceBusNamespaceProcessorFactory(
this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, subscription, client) -> {
String instrumentationName = name + "/" + getGroup(subscription);
Instrumentation instrumentation = new ServiceBusProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new ServiceBusProcessorContainer(factory);
}
return this.processorContainer;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
ServiceBusProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEntityName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
ServiceBusConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEntityName(key.getDestination());
consumerProperties.setSubscriptionName(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set message converter.
*
* @param messageConverter the message converter
*/
public void setMessageConverter(ServiceBusMessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
private String getGroup(String group) {
return group != null ? group : "";
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} |
same as above noted. setter better not set the default value. | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | this.defaultAzureCredential = defaultAzureCredential; | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | class DefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultEventHubsNamespaceProcessorFactory.class);
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final CheckpointStore checkpointStore;
private final PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> propertiesSupplier;
private final Map<ConsumerIdentifier, EventProcessorClient> processorClientMap = new ConcurrentHashMap<>();
private final ProcessorPropertiesParentMerger propertiesMerger = new ProcessorPropertiesParentMerger();
private AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided {@link CheckpointStore}.
* @param checkpointStore the checkpoint store.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore) {
this(checkpointStore, null, null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and namespace level properties.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties) {
this(checkpointStore, namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> supplier) {
this(checkpointStore, null, supplier);
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.checkpointStore = checkpointStore;
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return doCreateProcessor(eventHub, consumerGroup, listener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(eventHub, consumerGroup)));
}
@Override
public void destroy() {
this.processorClientMap.forEach((t, client) -> {
listeners.forEach(l -> l.processorRemoved(t.getDestination(), t.getGroup(), client));
client.stop();
});
this.processorClientMap.clear();
this.listeners.clear();
}
private EventProcessorClient doCreateProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener,
@Nullable ProcessorProperties properties) {
ConsumerIdentifier key = new ConsumerIdentifier(eventHub, consumerGroup);
return processorClientMap.computeIfAbsent(key, k -> {
ProcessorProperties processorProperties = propertiesMerger.mergeParent(properties, this.namespaceProperties);
processorProperties.setEventHubName(k.getDestination());
processorProperties.setConsumerGroup(k.getGroup());
EventProcessorClientBuilderFactory factory =
new EventProcessorClientBuilderFactory(processorProperties, this.checkpointStore, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
EventProcessorClient client = factory.build().buildEventProcessorClient();
LOGGER.info("EventProcessor created for event hub '{}' with consumer group '{}'", k.getDestination(), k.getGroup());
this.listeners.forEach(l -> l.processorAdded(k.getDestination(), k.getGroup(), client));
return client;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} | class DefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultEventHubsNamespaceProcessorFactory.class);
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final CheckpointStore checkpointStore;
private final PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> propertiesSupplier;
private final Map<ConsumerIdentifier, EventProcessorClient> processorClientMap = new ConcurrentHashMap<>();
private final ProcessorPropertiesParentMerger propertiesMerger = new ProcessorPropertiesParentMerger();
private AzureCredentialResolver<TokenCredential> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided {@link CheckpointStore}.
* @param checkpointStore the checkpoint store.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore) {
this(checkpointStore, null, null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and namespace level properties.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties) {
this(checkpointStore, namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> supplier) {
this(checkpointStore, null, supplier);
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.checkpointStore = checkpointStore;
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return doCreateProcessor(eventHub, consumerGroup, listener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(eventHub, consumerGroup)));
}
@Override
public void destroy() {
this.processorClientMap.forEach((t, client) -> {
listeners.forEach(l -> l.processorRemoved(t.getDestination(), t.getGroup(), client));
client.stop();
});
this.processorClientMap.clear();
this.listeners.clear();
}
private EventProcessorClient doCreateProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener,
@Nullable ProcessorProperties properties) {
ConsumerIdentifier key = new ConsumerIdentifier(eventHub, consumerGroup);
return processorClientMap.computeIfAbsent(key, k -> {
ProcessorProperties processorProperties = propertiesMerger.mergeParent(properties, this.namespaceProperties);
processorProperties.setEventHubName(k.getDestination());
processorProperties.setConsumerGroup(k.getGroup());
EventProcessorClientBuilderFactory factory =
new EventProcessorClientBuilderFactory(processorProperties, this.checkpointStore, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
EventProcessorClient client = factory.build().buildEventProcessorClient();
LOGGER.info("EventProcessor created for event hub '{}' with consumer group '{}'", k.getDestination(), k.getGroup());
this.listeners.forEach(l -> l.processorAdded(k.getDestination(), k.getGroup(), client));
return client;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<TokenCredential> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} |
same as above | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | this.defaultAzureCredential = defaultAzureCredential; | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | class DefaultServiceBusNamespaceProcessorFactory implements ServiceBusProcessorFactory, DisposableBean {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultServiceBusNamespaceProcessorFactory.class);
private final Map<ConsumerIdentifier, ServiceBusProcessorClient> processorMap = new ConcurrentHashMap<>();
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> propertiesSupplier;
private final ProcessorPropertiesParentMerger propertiesMerger = new ProcessorPropertiesParentMerger();
private AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided namespace level properties.
* @param namespaceProperties the namespace properties.
*/
public DefaultServiceBusNamespaceProcessorFactory(NamespaceProperties namespaceProperties) {
this(namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided namespace level properties and processor {@link PropertiesSupplier}.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each queue/topic entity.
*/
public DefaultServiceBusNamespaceProcessorFactory(NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> supplier) {
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
private void close(Map<ConsumerIdentifier, ServiceBusProcessorClient> map, Consumer<ServiceBusProcessorClient> close) {
map.forEach((t, p) -> {
try {
listeners.forEach(l -> l.processorRemoved(t.getDestination(), t.getGroup(), p));
close.accept(p);
} catch (Exception ex) {
LOGGER.warn("Failed to clean service bus queue client factory", ex);
}
});
}
@Override
public void destroy() {
close(processorMap, ServiceBusProcessorClient::close);
this.processorMap.clear();
this.listeners.clear();
}
@Override
public ServiceBusProcessorClient createProcessor(String queue, MessageProcessingListener messageProcessingListener) {
return doCreateProcessor(queue, null, messageProcessingListener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(queue)));
}
@Override
public ServiceBusProcessorClient createProcessor(String topic,
String subscription,
MessageProcessingListener messageProcessingListener) {
return doCreateProcessor(topic, subscription, messageProcessingListener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(topic, subscription)));
}
private ServiceBusProcessorClient doCreateProcessor(String name, String subscription,
MessageProcessingListener listener,
@Nullable ProcessorProperties properties) {
ConsumerIdentifier key = new ConsumerIdentifier(name, subscription);
return processorMap.computeIfAbsent(key, k -> {
ProcessorProperties processorProperties = propertiesMerger.mergeParent(properties, this.namespaceProperties);
processorProperties.setAutoComplete(false);
processorProperties.setEntityName(k.getDestination());
if (!k.hasGroup()) {
processorProperties.setEntityType(ServiceBusEntityType.QUEUE);
} else {
processorProperties.setEntityType(ServiceBusEntityType.TOPIC);
processorProperties.setSubscriptionName(k.getGroup());
}
ServiceBusProcessorClient client;
if (Boolean.TRUE.equals(processorProperties.getSessionEnabled())) {
ServiceBusSessionProcessorClientBuilderFactory factory =
new ServiceBusSessionProcessorClientBuilderFactory(processorProperties, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_SERVICE_BUS);
client = factory.build().buildProcessorClient();
} else {
ServiceBusProcessorClientBuilderFactory factory =
new ServiceBusProcessorClientBuilderFactory(processorProperties, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_SERVICE_BUS);
client = factory.build().buildProcessorClient();
}
this.listeners.forEach(l -> l.processorAdded(k.getDestination(), k.getGroup(), client));
return client;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} | class DefaultServiceBusNamespaceProcessorFactory implements ServiceBusProcessorFactory, DisposableBean {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultServiceBusNamespaceProcessorFactory.class);
private final Map<ConsumerIdentifier, ServiceBusProcessorClient> processorMap = new ConcurrentHashMap<>();
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> propertiesSupplier;
private final ProcessorPropertiesParentMerger propertiesMerger = new ProcessorPropertiesParentMerger();
private AzureCredentialResolver<TokenCredential> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided namespace level properties.
* @param namespaceProperties the namespace properties.
*/
public DefaultServiceBusNamespaceProcessorFactory(NamespaceProperties namespaceProperties) {
this(namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided namespace level properties and processor {@link PropertiesSupplier}.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each queue/topic entity.
*/
public DefaultServiceBusNamespaceProcessorFactory(NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> supplier) {
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
private void close(Map<ConsumerIdentifier, ServiceBusProcessorClient> map, Consumer<ServiceBusProcessorClient> close) {
map.forEach((t, p) -> {
try {
listeners.forEach(l -> l.processorRemoved(t.getDestination(), t.getGroup(), p));
close.accept(p);
} catch (Exception ex) {
LOGGER.warn("Failed to clean service bus queue client factory", ex);
}
});
}
@Override
public void destroy() {
close(processorMap, ServiceBusProcessorClient::close);
this.processorMap.clear();
this.listeners.clear();
}
@Override
public ServiceBusProcessorClient createProcessor(String queue, MessageProcessingListener messageProcessingListener) {
return doCreateProcessor(queue, null, messageProcessingListener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(queue)));
}
@Override
public ServiceBusProcessorClient createProcessor(String topic,
String subscription,
MessageProcessingListener messageProcessingListener) {
return doCreateProcessor(topic, subscription, messageProcessingListener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(topic, subscription)));
}
private ServiceBusProcessorClient doCreateProcessor(String name, String subscription,
MessageProcessingListener listener,
@Nullable ProcessorProperties properties) {
ConsumerIdentifier key = new ConsumerIdentifier(name, subscription);
return processorMap.computeIfAbsent(key, k -> {
ProcessorProperties processorProperties = propertiesMerger.mergeParent(properties, this.namespaceProperties);
processorProperties.setAutoComplete(false);
processorProperties.setEntityName(k.getDestination());
if (!k.hasGroup()) {
processorProperties.setEntityType(ServiceBusEntityType.QUEUE);
} else {
processorProperties.setEntityType(ServiceBusEntityType.TOPIC);
processorProperties.setSubscriptionName(k.getGroup());
}
ServiceBusProcessorClient client;
if (Boolean.TRUE.equals(processorProperties.getSessionEnabled())) {
ServiceBusSessionProcessorClientBuilderFactory factory =
new ServiceBusSessionProcessorClientBuilderFactory(processorProperties, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_SERVICE_BUS);
client = factory.build().buildProcessorClient();
} else {
ServiceBusProcessorClientBuilderFactory factory =
new ServiceBusProcessorClientBuilderFactory(processorProperties, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_SERVICE_BUS);
client = factory.build().buildProcessorClient();
}
this.listeners.forEach(l -> l.processorAdded(k.getDestination(), k.getGroup(), client));
return client;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<TokenCredential> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} |
same as above. | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | this.defaultAzureCredential = defaultAzureCredential; | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | class DefaultServiceBusNamespaceProducerFactory implements ServiceBusProducerFactory, DisposableBean {
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final PropertiesSupplier<String, ProducerProperties> propertiesSupplier;
private final Map<String, ServiceBusSenderAsyncClient> clients = new ConcurrentHashMap<>();
private final SenderPropertiesParentMerger parentMerger = new SenderPropertiesParentMerger();
private AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided namespace level configuration.
* @param namespaceProperties the namespace properties
*/
public DefaultServiceBusNamespaceProducerFactory(NamespaceProperties namespaceProperties) {
this(namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided namespace level configuration and producer {@link PropertiesSupplier}.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProducerProperties} for each queue/topic entity.
*/
public DefaultServiceBusNamespaceProducerFactory(NamespaceProperties namespaceProperties,
PropertiesSupplier<String, ProducerProperties> supplier) {
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public ServiceBusSenderAsyncClient createProducer(String name) {
return createProducer(name, null);
}
@Override
public ServiceBusSenderAsyncClient createProducer(String name, ServiceBusEntityType entityType) {
ProducerProperties producerProperties = this.propertiesSupplier.getProperties(name) != null
? this.propertiesSupplier.getProperties(name) : new ProducerProperties();
if (entityType != null) {
producerProperties.setEntityType(entityType);
}
return doCreateProducer(name, producerProperties);
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
@Override
public void destroy() {
clients.forEach((name, producer) -> {
listeners.forEach(l -> l.producerRemoved(name, producer));
producer.close();
});
this.clients.clear();
this.listeners.clear();
}
private ServiceBusSenderAsyncClient doCreateProducer(String name, @Nullable ProducerProperties properties) {
return clients.computeIfAbsent(name, entityName -> {
ProducerProperties producerProperties = parentMerger.mergeParent(properties, this.namespaceProperties);
producerProperties.setEntityName(entityName);
ServiceBusSenderClientBuilderFactory factory = new ServiceBusSenderClientBuilderFactory(producerProperties);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_SERVICE_BUS);
ServiceBusSenderAsyncClient producerClient = factory.build().buildAsyncClient();
this.listeners.forEach(l -> l.producerAdded(entityName, producerClient));
return producerClient;
});
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} | class DefaultServiceBusNamespaceProducerFactory implements ServiceBusProducerFactory, DisposableBean {
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final PropertiesSupplier<String, ProducerProperties> propertiesSupplier;
private final Map<String, ServiceBusSenderAsyncClient> clients = new ConcurrentHashMap<>();
private final SenderPropertiesParentMerger parentMerger = new SenderPropertiesParentMerger();
private AzureCredentialResolver<TokenCredential> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided namespace level configuration.
* @param namespaceProperties the namespace properties
*/
public DefaultServiceBusNamespaceProducerFactory(NamespaceProperties namespaceProperties) {
this(namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided namespace level configuration and producer {@link PropertiesSupplier}.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProducerProperties} for each queue/topic entity.
*/
public DefaultServiceBusNamespaceProducerFactory(NamespaceProperties namespaceProperties,
PropertiesSupplier<String, ProducerProperties> supplier) {
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public ServiceBusSenderAsyncClient createProducer(String name) {
return createProducer(name, null);
}
@Override
public ServiceBusSenderAsyncClient createProducer(String name, ServiceBusEntityType entityType) {
ProducerProperties producerProperties = this.propertiesSupplier.getProperties(name) != null
? this.propertiesSupplier.getProperties(name) : new ProducerProperties();
if (entityType != null) {
producerProperties.setEntityType(entityType);
}
return doCreateProducer(name, producerProperties);
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
@Override
public void destroy() {
clients.forEach((name, producer) -> {
listeners.forEach(l -> l.producerRemoved(name, producer));
producer.close();
});
this.clients.clear();
this.listeners.clear();
}
private ServiceBusSenderAsyncClient doCreateProducer(String name, @Nullable ProducerProperties properties) {
return clients.computeIfAbsent(name, entityName -> {
ProducerProperties producerProperties = parentMerger.mergeParent(properties, this.namespaceProperties);
producerProperties.setEntityName(entityName);
ServiceBusSenderClientBuilderFactory factory = new ServiceBusSenderClientBuilderFactory(producerProperties);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_SERVICE_BUS);
ServiceBusSenderAsyncClient producerClient = factory.build().buildAsyncClient();
this.listeners.forEach(l -> l.producerAdded(entityName, producerClient));
return producerClient;
});
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<TokenCredential> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} |
Yes. AbstractAzureCredentialBuilderFactory extends the AbstractAzureServiceClientBuilderFactory. | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | if (bean instanceof AbstractAzureServiceClientBuilderFactory | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} |
Then you have to write it for every builderfactory, but with this bpp, we only need to write it in one place. | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | (TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)); | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureCredentialBuilderFactory) {
return bean;
}
if (bean instanceof AbstractAzureServiceClientBuilderFactory
&& beanFactory.containsBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME)) {
((AbstractAzureServiceClientBuilderFactory) bean).setDefaultTokenCredential(
(TokenCredential) beanFactory.getBean(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME));
}
return bean;
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} | class AzureServiceClientBuilderFactoryPostProcessor implements BeanPostProcessor, BeanFactoryAware {
private BeanFactory beanFactory;
@Override
@SuppressWarnings("rawtypes")
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
} |
No, the defaultTokenCredential could be set from outside. It will have a fall back value if none is set. | public void setDefaultTokenCredential(TokenCredential defaultTokenCredential) {
if (defaultTokenCredential != null) {
this.defaultTokenCredential = defaultTokenCredential;
}
} | this.defaultTokenCredential = defaultTokenCredential; | public void setDefaultTokenCredential(TokenCredential defaultTokenCredential) {
if (defaultTokenCredential != null) {
this.defaultTokenCredential = defaultTokenCredential;
} else {
LOGGER.debug("Will ignore the 'null' default token credential.");
}
} | class {} | class {} |
The binder will be created via the binder auto-configuration, so the list won't be null. | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | this.clientFactoryCustomizers = clientFactoryCustomizers; | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | class EventHubsMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<EventHubsConsumerProperties>, ExtendedProducerProperties<EventHubsProducerProperties>, EventHubsChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, EventHubsConsumerProperties, EventHubsProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageChannelBinder.class);
private static final ExpressionParser EXPRESSION_PARSER = new SpelExpressionParser();
private NamespaceProperties namespaceProperties;
private EventHubsTemplate eventHubsTemplate;
private CheckpointStore checkpointStore;
private EventHubsProcessorContainer processorContainer;
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private EventHubsExtendedBindingProperties bindingProperties = new EventHubsExtendedBindingProperties();
private final Map<String, ExtendedProducerProperties<EventHubsProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<EventHubsConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link EventHubsMessageChannelBinder} with the specified headers to embed and {@link EventHubsChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public EventHubsMessageChannelBinder(String[] headersToEmbed, EventHubsChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<EventHubsProducerProperties> producerProperties,
MessageChannel errorChannel) {
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
Assert.notNull(getEventHubTemplate(), "eventHubsTemplate can't be null when create a producer");
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.eventHubsTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionIdExpression(
EXPRESSION_PARSER.parseExpression("headers['" + BinderHeaders.PARTITION_HEADER + "']"));
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<EventHubsConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
Assert.notNull(getProcessorContainer(), "eventProcessorsContainer can't be null when create a consumer");
boolean anonymous = !StringUtils.hasText(group);
if (anonymous) {
group = "anonymous." + UUID.randomUUID();
}
EventHubsInboundChannelAdapter inboundAdapter;
if (properties.isBatchMode()) {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, ListenerMode.BATCH, properties.getExtension().getCheckpoint());
} else {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, properties.getExtension().getCheckpoint());
}
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + group);
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, group, properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
return inboundAdapter;
}
@Override
public EventHubsConsumerProperties getExtendedConsumerProperties(String destination) {
return this.bindingProperties.getExtendedConsumerProperties(destination);
}
@Override
public EventHubsProducerProperties getExtendedProducerProperties(String destination) {
return this.bindingProperties.getExtendedProducerProperties(destination);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(EventHubsExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
EventHubsProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEventHubName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
EventHubsConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
private EventHubsTemplate getEventHubTemplate() {
if (this.eventHubsTemplate == null) {
DefaultEventHubsNamespaceProducerFactory factory = new DefaultEventHubsNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, producerAsyncClient) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.eventHubsTemplate = new EventHubsTemplate(factory);
}
return this.eventHubsTemplate;
}
private EventHubsProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultEventHubsNamespaceProcessorFactory factory = new DefaultEventHubsNamespaceProcessorFactory(
this.checkpointStore, this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, consumerGroup, processorClient) -> {
String instrumentationName = name + "/" + consumerGroup;
Instrumentation instrumentation = new EventHubsProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new EventHubsProcessorContainer(factory);
}
return this.processorContainer;
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set checkpoint store.
*
* @param checkpointStore the checkpoint store
*/
public void setCheckpointStore(CheckpointStore checkpointStore) {
this.checkpointStore = checkpointStore;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
* @see InstrumentationManager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} | class EventHubsMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<EventHubsConsumerProperties>, ExtendedProducerProperties<EventHubsProducerProperties>, EventHubsChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, EventHubsConsumerProperties, EventHubsProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageChannelBinder.class);
private static final ExpressionParser EXPRESSION_PARSER = new SpelExpressionParser();
private NamespaceProperties namespaceProperties;
private EventHubsTemplate eventHubsTemplate;
private CheckpointStore checkpointStore;
private EventHubsProcessorContainer processorContainer;
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private EventHubsExtendedBindingProperties bindingProperties = new EventHubsExtendedBindingProperties();
private final Map<String, ExtendedProducerProperties<EventHubsProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<EventHubsConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link EventHubsMessageChannelBinder} with the specified headers to embed and {@link EventHubsChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public EventHubsMessageChannelBinder(String[] headersToEmbed, EventHubsChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<EventHubsProducerProperties> producerProperties,
MessageChannel errorChannel) {
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
Assert.notNull(getEventHubTemplate(), "eventHubsTemplate can't be null when create a producer");
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.eventHubsTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionIdExpression(
EXPRESSION_PARSER.parseExpression("headers['" + BinderHeaders.PARTITION_HEADER + "']"));
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<EventHubsConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
Assert.notNull(getProcessorContainer(), "eventProcessorsContainer can't be null when create a consumer");
boolean anonymous = !StringUtils.hasText(group);
if (anonymous) {
group = "anonymous." + UUID.randomUUID();
}
EventHubsInboundChannelAdapter inboundAdapter;
if (properties.isBatchMode()) {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, ListenerMode.BATCH, properties.getExtension().getCheckpoint());
} else {
inboundAdapter = new EventHubsInboundChannelAdapter(this.processorContainer,
destination.getName(), group, properties.getExtension().getCheckpoint());
}
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + group);
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, group, properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
return inboundAdapter;
}
@Override
public EventHubsConsumerProperties getExtendedConsumerProperties(String destination) {
return this.bindingProperties.getExtendedConsumerProperties(destination);
}
@Override
public EventHubsProducerProperties getExtendedProducerProperties(String destination) {
return this.bindingProperties.getExtendedProducerProperties(destination);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(EventHubsExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
EventHubsProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEventHubName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
EventHubsConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEventHubName(key.getDestination());
consumerProperties.setConsumerGroup(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
private EventHubsTemplate getEventHubTemplate() {
if (this.eventHubsTemplate == null) {
DefaultEventHubsNamespaceProducerFactory factory = new DefaultEventHubsNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, producerAsyncClient) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.eventHubsTemplate = new EventHubsTemplate(factory);
}
return this.eventHubsTemplate;
}
private EventHubsProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultEventHubsNamespaceProcessorFactory factory = new DefaultEventHubsNamespaceProcessorFactory(
this.checkpointStore, this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, consumerGroup, processorClient) -> {
String instrumentationName = name + "/" + consumerGroup;
Instrumentation instrumentation = new EventHubsProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new EventHubsProcessorContainer(factory);
}
return this.processorContainer;
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set checkpoint store.
*
* @param checkpointStore the checkpoint store
*/
public void setCheckpointStore(CheckpointStore checkpointStore) {
this.checkpointStore = checkpointStore;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
* @see InstrumentationManager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} |
Same here, won't be null. | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | this.clientFactoryCustomizers = clientFactoryCustomizers; | public void setClientFactoryCustomizers(List<ClientFactoryCustomizer> clientFactoryCustomizers) {
this.clientFactoryCustomizers = clientFactoryCustomizers;
} | class ServiceBusMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<ServiceBusConsumerProperties>,
ExtendedProducerProperties<ServiceBusProducerProperties>,
ServiceBusChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, ServiceBusConsumerProperties, ServiceBusProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageChannelBinder.class);
private static final DefaultErrorMessageStrategy DEFAULT_ERROR_MESSAGE_STRATEGY = new DefaultErrorMessageStrategy();
private static final String EXCEPTION_MESSAGE = "exception-message";
private ServiceBusExtendedBindingProperties bindingProperties = new ServiceBusExtendedBindingProperties();
private NamespaceProperties namespaceProperties;
private ServiceBusTemplate serviceBusTemplate;
private ServiceBusProcessorContainer processorContainer;
private ServiceBusMessageConverter messageConverter = new ServiceBusMessageConverter();
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private final Map<String, ExtendedProducerProperties<ServiceBusProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<ServiceBusConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link ServiceBusMessageChannelBinder} with the specified headersToEmbed and {@link ServiceBusChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public ServiceBusMessageChannelBinder(String[] headersToEmbed, ServiceBusChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<ServiceBusProducerProperties> producerProperties,
MessageChannel errorChannel) {
Assert.notNull(getServiceBusTemplate(), "ServiceBusTemplate can't be null when create a producer");
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.serviceBusTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionKeyExpressionString(
"'partitionKey-' + headers['" + BinderHeaders.PARTITION_HEADER + "']");
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
final ServiceBusInboundChannelAdapter inboundAdapter;
inboundAdapter = new ServiceBusInboundChannelAdapter(getProcessorContainer(), destination.getName(), group,
buildCheckpointConfig(properties));
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + getGroup(group));
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, getGroup(group), properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
inboundAdapter.setMessageConverter(messageConverter);
return inboundAdapter;
}
@Override
protected MessageHandler getErrorMessageHandler(ConsumerDestination destination,
String group,
final ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return message -> {
Assert.state(message instanceof ErrorMessage, "Expected an ErrorMessage, not a "
+ message.getClass().toString() + " for: " + message);
ErrorMessage errorMessage = (ErrorMessage) message;
Message<?> amqpMessage = errorMessage.getOriginalMessage();
if (amqpMessage == null) {
logger.error("No raw message header in " + message);
} else {
Throwable cause = (Throwable) message.getPayload();
if (properties.getExtension().isRequeueRejected()) {
deadLetter(destination.getName(), amqpMessage, EXCEPTION_MESSAGE,
cause.getCause() != null ? cause.getCause().getMessage() : cause.getMessage());
} else {
abandon(destination.getName(), amqpMessage);
}
}
};
}
/**
* Moves a message to the dead-letter sub-queue with dead-letter reason.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
* @param deadLetterReason the dead-letter reason
* @param deadLetterErrorDescription the dead-letter error description
*/
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
/**
* Abandons the message in this context.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
*/
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
public ServiceBusConsumerProperties getExtendedConsumerProperties(String channelName) {
return this.bindingProperties.getExtendedConsumerProperties(channelName);
}
@Override
public ServiceBusProducerProperties getExtendedProducerProperties(String channelName) {
return this.bindingProperties.getExtendedProducerProperties(channelName);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
@Override
protected ErrorMessageStrategy getErrorMessageStrategy() {
return DEFAULT_ERROR_MESSAGE_STRATEGY;
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(ServiceBusExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private CheckpointConfig buildCheckpointConfig(
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return new CheckpointConfig(properties.getExtension().getCheckpointMode());
}
private ServiceBusTemplate getServiceBusTemplate() {
if (this.serviceBusTemplate == null) {
DefaultServiceBusNamespaceProducerFactory factory = new DefaultServiceBusNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, client) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.serviceBusTemplate = new ServiceBusTemplate(factory);
}
return this.serviceBusTemplate;
}
private ServiceBusProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultServiceBusNamespaceProcessorFactory factory = new DefaultServiceBusNamespaceProcessorFactory(
this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, subscription, client) -> {
String instrumentationName = name + "/" + getGroup(subscription);
Instrumentation instrumentation = new ServiceBusProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new ServiceBusProcessorContainer(factory);
}
return this.processorContainer;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
ServiceBusProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEntityName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
ServiceBusConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEntityName(key.getDestination());
consumerProperties.setSubscriptionName(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set message converter.
*
* @param messageConverter the message converter
*/
public void setMessageConverter(ServiceBusMessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
private String getGroup(String group) {
return group != null ? group : "";
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} | class ServiceBusMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<ServiceBusConsumerProperties>,
ExtendedProducerProperties<ServiceBusProducerProperties>,
ServiceBusChannelProvisioner>
implements
ExtendedPropertiesBinder<MessageChannel, ServiceBusConsumerProperties, ServiceBusProducerProperties> {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageChannelBinder.class);
private static final DefaultErrorMessageStrategy DEFAULT_ERROR_MESSAGE_STRATEGY = new DefaultErrorMessageStrategy();
private static final String EXCEPTION_MESSAGE = "exception-message";
private ServiceBusExtendedBindingProperties bindingProperties = new ServiceBusExtendedBindingProperties();
private NamespaceProperties namespaceProperties;
private ServiceBusTemplate serviceBusTemplate;
private ServiceBusProcessorContainer processorContainer;
private ServiceBusMessageConverter messageConverter = new ServiceBusMessageConverter();
private final InstrumentationManager instrumentationManager = new DefaultInstrumentationManager();
private final Map<String, ExtendedProducerProperties<ServiceBusProducerProperties>>
extendedProducerPropertiesMap = new ConcurrentHashMap<>();
private final Map<ConsumerIdentifier, ExtendedConsumerProperties<ServiceBusConsumerProperties>>
extendedConsumerPropertiesMap = new ConcurrentHashMap<>();
private List<ClientFactoryCustomizer> clientFactoryCustomizers = new ArrayList<>();
/**
* Construct a {@link ServiceBusMessageChannelBinder} with the specified headersToEmbed and {@link ServiceBusChannelProvisioner}.
*
* @param headersToEmbed the headers to embed
* @param provisioningProvider the provisioning provider
*/
public ServiceBusMessageChannelBinder(String[] headersToEmbed, ServiceBusChannelProvisioner provisioningProvider) {
super(headersToEmbed, provisioningProvider);
}
@Override
protected MessageHandler createProducerMessageHandler(
ProducerDestination destination,
ExtendedProducerProperties<ServiceBusProducerProperties> producerProperties,
MessageChannel errorChannel) {
Assert.notNull(getServiceBusTemplate(), "ServiceBusTemplate can't be null when create a producer");
extendedProducerPropertiesMap.put(destination.getName(), producerProperties);
DefaultMessageHandler handler = new DefaultMessageHandler(destination.getName(), this.serviceBusTemplate);
handler.setBeanFactory(getBeanFactory());
handler.setSync(producerProperties.getExtension().isSync());
handler.setSendTimeout(producerProperties.getExtension().getSendTimeout().toMillis());
handler.setSendFailureChannel(errorChannel);
String instrumentationId = Instrumentation.buildId(PRODUCER, destination.getName());
handler.setSendCallback(new InstrumentationSendCallback(instrumentationId, instrumentationManager));
if (producerProperties.isPartitioned()) {
handler.setPartitionKeyExpressionString(
"'partitionKey-' + headers['" + BinderHeaders.PARTITION_HEADER + "']");
} else {
handler.setPartitionKeyExpression(new FunctionExpression<Message<?>>(m -> m.getPayload().hashCode()));
}
return handler;
}
@Override
protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group,
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
extendedConsumerPropertiesMap.put(new ConsumerIdentifier(destination.getName(), group), properties);
final ServiceBusInboundChannelAdapter inboundAdapter;
inboundAdapter = new ServiceBusInboundChannelAdapter(getProcessorContainer(), destination.getName(), group,
buildCheckpointConfig(properties));
inboundAdapter.setBeanFactory(getBeanFactory());
String instrumentationId = Instrumentation.buildId(CONSUMER, destination.getName() + "/" + getGroup(group));
inboundAdapter.setInstrumentationManager(instrumentationManager);
inboundAdapter.setInstrumentationId(instrumentationId);
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, getGroup(group), properties);
inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel());
inboundAdapter.setMessageConverter(messageConverter);
return inboundAdapter;
}
@Override
protected MessageHandler getErrorMessageHandler(ConsumerDestination destination,
String group,
final ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return message -> {
Assert.state(message instanceof ErrorMessage, "Expected an ErrorMessage, not a "
+ message.getClass().toString() + " for: " + message);
ErrorMessage errorMessage = (ErrorMessage) message;
Message<?> amqpMessage = errorMessage.getOriginalMessage();
if (amqpMessage == null) {
logger.error("No raw message header in " + message);
} else {
Throwable cause = (Throwable) message.getPayload();
if (properties.getExtension().isRequeueRejected()) {
deadLetter(destination.getName(), amqpMessage, EXCEPTION_MESSAGE,
cause.getCause() != null ? cause.getCause().getMessage() : cause.getMessage());
} else {
abandon(destination.getName(), amqpMessage);
}
}
};
}
/**
* Moves a message to the dead-letter sub-queue with dead-letter reason.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
* @param deadLetterReason the dead-letter reason
* @param deadLetterErrorDescription the dead-letter error description
*/
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
/**
* Abandons the message in this context.
*
* @param <T> the type of message payload
* @param destination the destination
* @param message the message
*/
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
public ServiceBusConsumerProperties getExtendedConsumerProperties(String channelName) {
return this.bindingProperties.getExtendedConsumerProperties(channelName);
}
@Override
public ServiceBusProducerProperties getExtendedProducerProperties(String channelName) {
return this.bindingProperties.getExtendedProducerProperties(channelName);
}
@Override
public String getDefaultsPrefix() {
return this.bindingProperties.getDefaultsPrefix();
}
@Override
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
return this.bindingProperties.getExtendedPropertiesEntryClass();
}
@Override
protected ErrorMessageStrategy getErrorMessageStrategy() {
return DEFAULT_ERROR_MESSAGE_STRATEGY;
}
/**
* Set binding properties.
*
* @param bindingProperties the binding properties
*/
public void setBindingProperties(ServiceBusExtendedBindingProperties bindingProperties) {
this.bindingProperties = bindingProperties;
}
private CheckpointConfig buildCheckpointConfig(
ExtendedConsumerProperties<ServiceBusConsumerProperties> properties) {
return new CheckpointConfig(properties.getExtension().getCheckpointMode());
}
private ServiceBusTemplate getServiceBusTemplate() {
if (this.serviceBusTemplate == null) {
DefaultServiceBusNamespaceProducerFactory factory = new DefaultServiceBusNamespaceProducerFactory(
this.namespaceProperties, getProducerPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, client) -> {
DefaultInstrumentation instrumentation = new DefaultInstrumentation(name, PRODUCER);
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.serviceBusTemplate = new ServiceBusTemplate(factory);
}
return this.serviceBusTemplate;
}
private ServiceBusProcessorContainer getProcessorContainer() {
if (this.processorContainer == null) {
DefaultServiceBusNamespaceProcessorFactory factory = new DefaultServiceBusNamespaceProcessorFactory(
this.namespaceProperties, getProcessorPropertiesSupplier());
clientFactoryCustomizers.forEach(customizer -> customizer.customize(factory));
factory.addListener((name, subscription, client) -> {
String instrumentationName = name + "/" + getGroup(subscription);
Instrumentation instrumentation = new ServiceBusProcessorInstrumentation(instrumentationName, CONSUMER, Duration.ofMinutes(2));
instrumentation.markUp();
instrumentationManager.addHealthInstrumentation(instrumentation);
});
this.processorContainer = new ServiceBusProcessorContainer(factory);
}
return this.processorContainer;
}
private PropertiesSupplier<String, ProducerProperties> getProducerPropertiesSupplier() {
return key -> {
if (this.extendedProducerPropertiesMap.containsKey(key)) {
ServiceBusProducerProperties producerProperties = this.extendedProducerPropertiesMap.get(key)
.getExtension();
producerProperties.setEntityName(key);
return producerProperties;
} else {
LOGGER.debug("Can't find extended properties for {}", key);
return null;
}
};
}
private PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> getProcessorPropertiesSupplier() {
return key -> {
if (this.extendedConsumerPropertiesMap.containsKey(key)) {
ServiceBusConsumerProperties consumerProperties = this.extendedConsumerPropertiesMap.get(key)
.getExtension();
consumerProperties.setEntityName(key.getDestination());
consumerProperties.setSubscriptionName(key.getGroup());
return consumerProperties;
} else {
LOGGER.debug("Can't find extended properties for destination {}, group {}", key.getDestination(), key.getGroup());
return null;
}
};
}
/**
* Set namespace properties.
*
* @param namespaceProperties the namespace properties
*/
public void setNamespaceProperties(NamespaceProperties namespaceProperties) {
this.namespaceProperties = namespaceProperties;
}
/**
* Set message converter.
*
* @param messageConverter the message converter
*/
public void setMessageConverter(ServiceBusMessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
/**
* Get instrumentation manager.
*
* @return instrumentationManager the instrumentation manager
*/
public InstrumentationManager getInstrumentationManager() {
return instrumentationManager;
}
private String getGroup(String group) {
return group != null ? group : "";
}
/**
* Set the client factory customizers.
* @param clientFactoryCustomizers The client factory customizers.
*/
} |
It is by design. | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | this.defaultAzureCredential = defaultAzureCredential; | public void setDefaultAzureCredential(DefaultAzureCredential defaultAzureCredential) {
this.defaultAzureCredential = defaultAzureCredential;
} | class DefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultEventHubsNamespaceProcessorFactory.class);
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final CheckpointStore checkpointStore;
private final PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> propertiesSupplier;
private final Map<ConsumerIdentifier, EventProcessorClient> processorClientMap = new ConcurrentHashMap<>();
private final ProcessorPropertiesParentMerger propertiesMerger = new ProcessorPropertiesParentMerger();
private AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided {@link CheckpointStore}.
* @param checkpointStore the checkpoint store.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore) {
this(checkpointStore, null, null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and namespace level properties.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties) {
this(checkpointStore, namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> supplier) {
this(checkpointStore, null, supplier);
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.checkpointStore = checkpointStore;
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return doCreateProcessor(eventHub, consumerGroup, listener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(eventHub, consumerGroup)));
}
@Override
public void destroy() {
this.processorClientMap.forEach((t, client) -> {
listeners.forEach(l -> l.processorRemoved(t.getDestination(), t.getGroup(), client));
client.stop();
});
this.processorClientMap.clear();
this.listeners.clear();
}
private EventProcessorClient doCreateProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener,
@Nullable ProcessorProperties properties) {
ConsumerIdentifier key = new ConsumerIdentifier(eventHub, consumerGroup);
return processorClientMap.computeIfAbsent(key, k -> {
ProcessorProperties processorProperties = propertiesMerger.mergeParent(properties, this.namespaceProperties);
processorProperties.setEventHubName(k.getDestination());
processorProperties.setConsumerGroup(k.getGroup());
EventProcessorClientBuilderFactory factory =
new EventProcessorClientBuilderFactory(processorProperties, this.checkpointStore, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
EventProcessorClient client = factory.build().buildEventProcessorClient();
LOGGER.info("EventProcessor created for event hub '{}' with consumer group '{}'", k.getDestination(), k.getGroup());
this.listeners.forEach(l -> l.processorAdded(k.getDestination(), k.getGroup(), client));
return client;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<AzureTokenCredentialProvider> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} | class DefaultEventHubsNamespaceProcessorFactory implements EventHubsProcessorFactory, DisposableBean {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultEventHubsNamespaceProcessorFactory.class);
private final List<Listener> listeners = new ArrayList<>();
private final NamespaceProperties namespaceProperties;
private final CheckpointStore checkpointStore;
private final PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> propertiesSupplier;
private final Map<ConsumerIdentifier, EventProcessorClient> processorClientMap = new ConcurrentHashMap<>();
private final ProcessorPropertiesParentMerger propertiesMerger = new ProcessorPropertiesParentMerger();
private AzureCredentialResolver<TokenCredential> tokenCredentialResolver = null;
private DefaultAzureCredential defaultAzureCredential = null;
/**
* Construct a factory with the provided {@link CheckpointStore}.
* @param checkpointStore the checkpoint store.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore) {
this(checkpointStore, null, null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and namespace level properties.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties) {
this(checkpointStore, namespaceProperties, key -> null);
}
/**
* Construct a factory with the provided {@link CheckpointStore} and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
PropertiesSupplier<ConsumerIdentifier, ProcessorProperties> supplier) {
this(checkpointStore, null, supplier);
}
/**
* Construct a factory with the provided {@link CheckpointStore}, namespace level properties and processor {@link PropertiesSupplier}.
* @param checkpointStore the checkpoint store.
* @param namespaceProperties the namespace properties.
* @param supplier the {@link PropertiesSupplier} to supply {@link ProcessorProperties} for each event hub.
*/
public DefaultEventHubsNamespaceProcessorFactory(CheckpointStore checkpointStore,
NamespaceProperties namespaceProperties,
PropertiesSupplier<ConsumerIdentifier,
ProcessorProperties> supplier) {
Assert.notNull(checkpointStore, "CheckpointStore must be provided.");
this.checkpointStore = checkpointStore;
this.namespaceProperties = namespaceProperties;
this.propertiesSupplier = supplier == null ? key -> null : supplier;
}
@Override
public EventProcessorClient createProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener) {
return doCreateProcessor(eventHub, consumerGroup, listener,
this.propertiesSupplier.getProperties(new ConsumerIdentifier(eventHub, consumerGroup)));
}
@Override
public void destroy() {
this.processorClientMap.forEach((t, client) -> {
listeners.forEach(l -> l.processorRemoved(t.getDestination(), t.getGroup(), client));
client.stop();
});
this.processorClientMap.clear();
this.listeners.clear();
}
private EventProcessorClient doCreateProcessor(@NonNull String eventHub, @NonNull String consumerGroup,
@NonNull EventProcessingListener listener,
@Nullable ProcessorProperties properties) {
ConsumerIdentifier key = new ConsumerIdentifier(eventHub, consumerGroup);
return processorClientMap.computeIfAbsent(key, k -> {
ProcessorProperties processorProperties = propertiesMerger.mergeParent(properties, this.namespaceProperties);
processorProperties.setEventHubName(k.getDestination());
processorProperties.setConsumerGroup(k.getGroup());
EventProcessorClientBuilderFactory factory =
new EventProcessorClientBuilderFactory(processorProperties, this.checkpointStore, listener);
factory.setDefaultTokenCredential(this.defaultAzureCredential);
factory.setTokenCredentialResolver(this.tokenCredentialResolver);
factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_INTEGRATION_EVENT_HUBS);
EventProcessorClient client = factory.build().buildEventProcessorClient();
LOGGER.info("EventProcessor created for event hub '{}' with consumer group '{}'", k.getDestination(), k.getGroup());
this.listeners.forEach(l -> l.processorAdded(k.getDestination(), k.getGroup(), client));
return client;
});
}
@Override
public void addListener(Listener listener) {
this.listeners.add(listener);
}
@Override
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
/**
* Set the token credential resolver.
* @param tokenCredentialResolver The token credential resolver.
*/
public void setTokenCredentialResolver(AzureCredentialResolver<TokenCredential> tokenCredentialResolver) {
this.tokenCredentialResolver = tokenCredentialResolver;
}
/**
* Set the default Azure credential.
* @param defaultAzureCredential The default Azure Credential.
*/
} |
Does this properly handle multi-catch blocks, ex `catch (IOException | UncheckIOException ex)`? | public void visitToken(DetailAST catchBlockToken) {
final DetailAST catchStatement = catchBlockToken.findFirstToken(TokenTypes.PARAMETER_DEF);
final String caughtExceptionVariableName = catchStatement.findFirstToken(TokenTypes.IDENT).getText();
final List<DetailAST> throwStatements = getThrowStatements(catchBlockToken);
final List<String> wrappedExceptions =
getWrappedExceptions(catchBlockToken, catchBlockToken, caughtExceptionVariableName);
throwStatements.forEach(throwToken -> {
final List<String> throwParamNames = new LinkedList<>();
getThrowParamNames(throwToken, throwParamNames);
wrappedExceptions.add(caughtExceptionVariableName);
List<String> intersect =
wrappedExceptions.stream().filter(throwParamNames::contains).collect(Collectors.toList());
if (intersect.size() == 0) {
log(throwToken, String.format(UNUSED_CAUGHT_EXCEPTION_ERROR, caughtExceptionVariableName));
}
});
} | final List<String> wrappedExceptions = | public void visitToken(DetailAST catchBlockToken) {
final DetailAST catchStatement = catchBlockToken.findFirstToken(TokenTypes.PARAMETER_DEF);
final String caughtExceptionVariableName = catchStatement.findFirstToken(TokenTypes.IDENT).getText();
final List<DetailAST> throwStatements = getThrowStatements(catchBlockToken);
final List<String> wrappedExceptions =
getWrappedExceptions(catchBlockToken, catchBlockToken, caughtExceptionVariableName);
throwStatements.forEach(throwToken -> {
final List<String> throwParamNames = new LinkedList<>();
getThrowParamNames(throwToken, throwParamNames);
wrappedExceptions.add(caughtExceptionVariableName);
List<String> intersect =
wrappedExceptions.stream().filter(throwParamNames::contains).collect(Collectors.toList());
if (intersect.size() == 0) {
log(throwToken, String.format(UNUSED_CAUGHT_EXCEPTION_ERROR, caughtExceptionVariableName));
}
});
} | class UseCaughtExceptionCauseCheck extends AbstractCheck {
static final String UNUSED_CAUGHT_EXCEPTION_ERROR = "Should use the current exception cause \"%s\".";
@Override
public int[] getDefaultTokens() {
return getRequiredTokens();
}
@Override
public int[] getAcceptableTokens() {
return getRequiredTokens();
}
@Override
public int[] getRequiredTokens() {
return new int[] {TokenTypes.LITERAL_CATCH};
}
@Override
/**
* Returns the list of exceptions that wrapped the current exception tokens
*
* @param detailAST catch block throw parent token
* @param caughtExceptionVariableName list containing the exception tokens
* @return list of wrapped exception tokens
*/
private List<String> getWrappedExceptions(DetailAST currentCatchAST, DetailAST detailAST,
String caughtExceptionVariableName) {
final List<String> wrappedExceptionNames = new LinkedList<>();
for (DetailAST currentNode : getChildrenNodes(detailAST)) {
if (currentNode.getType() == TokenTypes.IDENT
&& currentNode.getText().equals(caughtExceptionVariableName)) {
getWrappedExceptionVariable(currentCatchAST, wrappedExceptionNames, currentNode);
}
if (currentNode.getFirstChild() != null) {
wrappedExceptionNames.addAll(
getWrappedExceptions(currentCatchAST, currentNode, caughtExceptionVariableName));
}
}
return wrappedExceptionNames;
}
/**
* Returns the wrapped exception variable name
*/
private void getWrappedExceptionVariable(DetailAST currentCatchBlock, List<String> wrappedExceptionNames,
DetailAST currentToken) {
DetailAST temp = currentToken;
while (!temp.equals(currentCatchBlock) && temp.getType() != TokenTypes.ASSIGN) {
temp = temp.getParent();
}
if (temp.getType() == TokenTypes.ASSIGN) {
final DetailAST wrappedException;
if (temp.getParent().getType() == TokenTypes.VARIABLE_DEF) {
wrappedException = temp.getParent().findFirstToken(TokenTypes.IDENT);
} else {
wrappedException = temp.findFirstToken(TokenTypes.IDENT);
}
if (wrappedException != null) {
wrappedExceptionNames.add(wrappedException.getText());
}
}
}
/**
* Returns the parameter names for current throw keyword.
*
* @param throwParent The parent throw token
* @param paramNames The list containing the parameter names
* @return list of throw param names
*/
private List<String> getThrowParamNames(DetailAST throwParent, List<String> paramNames) {
getChildrenNodes(throwParent).forEach(currentNode -> {
if (currentNode.getType() == TokenTypes.IDENT) {
paramNames.add(currentNode.getText());
}
if (currentNode.getFirstChild() != null) {
getThrowParamNames(currentNode, paramNames);
}
});
return paramNames;
}
/**
* Recursive method that searches for all the LITERAL_THROW on the current catch token.
*
* @param catchBlockToken A start token.
* @return list of throw tokens
*/
private List<DetailAST> getThrowStatements(DetailAST catchBlockToken) {
final List<DetailAST> throwStatements = new LinkedList<>();
getChildrenNodes(catchBlockToken).forEach(currentNode -> {
if (TokenTypes.LITERAL_THROW == currentNode.getType()) {
throwStatements.add(currentNode);
}
if (currentNode.getFirstChild() != null) {
throwStatements.addAll(getThrowStatements(currentNode));
}
});
return throwStatements;
}
/**
* Gets all the children of the current parent node.
*
* @param token parent node.
* @return List of children of the current node.
*/
private static List<DetailAST> getChildrenNodes(DetailAST token) {
final List<DetailAST> result = new LinkedList<>();
DetailAST currNode = token.getFirstChild();
while (currNode != null) {
result.add(currNode);
currNode = currNode.getNextSibling();
}
return result;
}
} | class UseCaughtExceptionCauseCheck extends AbstractCheck {
static final String UNUSED_CAUGHT_EXCEPTION_ERROR = "Caught and rethrown exceptions should include the caught"
+ " exception as the cause in the rethrown exception. Dropping the causal exception makes it more difficult"
+ " to troubleshoot issues when they arise. Include the caught exception variable %s as the cause.";
@Override
public int[] getDefaultTokens() {
return getRequiredTokens();
}
@Override
public int[] getAcceptableTokens() {
return getRequiredTokens();
}
@Override
public int[] getRequiredTokens() {
return new int[] {TokenTypes.LITERAL_CATCH};
}
@Override
/**
* Returns the list of exceptions that wrapped the current exception tokens
*
* @param currentCatchAST current catch block token
* @param detailAST catch block throw parent token
* @param caughtExceptionVariableName list containing the exception tokens
* @return list of wrapped exception tokens
*/
private List<String> getWrappedExceptions(DetailAST currentCatchAST, DetailAST detailAST,
String caughtExceptionVariableName) {
final List<String> wrappedExceptionNames = new LinkedList<>();
for (DetailAST currentNode : getChildrenNodes(detailAST)) {
if (currentNode.getType() == TokenTypes.IDENT
&& currentNode.getText().equals(caughtExceptionVariableName)) {
getWrappedExceptionVariable(currentCatchAST, wrappedExceptionNames, currentNode);
}
if (currentNode.getFirstChild() != null) {
wrappedExceptionNames.addAll(
getWrappedExceptions(currentCatchAST, currentNode, caughtExceptionVariableName));
}
}
return wrappedExceptionNames;
}
/**
* Returns the wrapped exception variable name
*/
private void getWrappedExceptionVariable(DetailAST currentCatchBlock, List<String> wrappedExceptionNames,
DetailAST currentToken) {
DetailAST temp = currentToken;
while (!temp.equals(currentCatchBlock) && temp.getType() != TokenTypes.ASSIGN) {
temp = temp.getParent();
}
if (temp.getType() == TokenTypes.ASSIGN) {
final DetailAST wrappedException;
if (temp.getParent().getType() == TokenTypes.VARIABLE_DEF) {
wrappedException = temp.getParent().findFirstToken(TokenTypes.IDENT);
} else if (temp.findFirstToken(TokenTypes.DOT) != null) {
wrappedException = temp.findFirstToken(TokenTypes.DOT).findFirstToken(TokenTypes.IDENT);
} else {
wrappedException = temp.findFirstToken(TokenTypes.IDENT);
}
if (wrappedException != null) {
wrappedExceptionNames.add(wrappedException.getText());
}
}
}
/**
* Returns the parameter names for current throw keyword.
*
* @param throwParent The parent throw token
* @param paramNames The list containing the parameter names
* @return list of throw param names
*/
private List<String> getThrowParamNames(DetailAST throwParent, List<String> paramNames) {
getChildrenNodes(throwParent).forEach(currentNode -> {
if (currentNode.getType() == TokenTypes.IDENT) {
paramNames.add(currentNode.getText());
}
if (currentNode.getFirstChild() != null) {
getThrowParamNames(currentNode, paramNames);
}
});
return paramNames;
}
/**
* Recursive method that searches for all the LITERAL_THROW on the current catch token.
*
* @param catchBlockToken A start token.
* @return list of throw tokens
*/
private List<DetailAST> getThrowStatements(DetailAST catchBlockToken) {
final List<DetailAST> throwStatements = new LinkedList<>();
getChildrenNodes(catchBlockToken).forEach(currentNode -> {
if (TokenTypes.LITERAL_THROW == currentNode.getType()) {
throwStatements.add(currentNode);
}
if (currentNode.getFirstChild() != null) {
throwStatements.addAll(getThrowStatements(currentNode));
}
});
return throwStatements;
}
/**
* Gets all the children by traversing the tree generated from the current parent node.
*
* @param token parent node.
* @return List of children of the current node.
*/
private static List<DetailAST> getChildrenNodes(DetailAST token) {
final List<DetailAST> result = new LinkedList<>();
DetailAST currNode = token.getFirstChild();
while (currNode != null) {
result.add(currNode);
currNode = currNode.getNextSibling();
}
return result;
}
} |
Added a testcase [here ](https://github.com/Azure/azure-sdk-for-java/pull/26818/files#diff-e0aec353d9eb79a0de87f968d476cb28aac9c1d0fc254d854a4cf26514a7db09R105) | public void visitToken(DetailAST catchBlockToken) {
final DetailAST catchStatement = catchBlockToken.findFirstToken(TokenTypes.PARAMETER_DEF);
final String caughtExceptionVariableName = catchStatement.findFirstToken(TokenTypes.IDENT).getText();
final List<DetailAST> throwStatements = getThrowStatements(catchBlockToken);
final List<String> wrappedExceptions =
getWrappedExceptions(catchBlockToken, catchBlockToken, caughtExceptionVariableName);
throwStatements.forEach(throwToken -> {
final List<String> throwParamNames = new LinkedList<>();
getThrowParamNames(throwToken, throwParamNames);
wrappedExceptions.add(caughtExceptionVariableName);
List<String> intersect =
wrappedExceptions.stream().filter(throwParamNames::contains).collect(Collectors.toList());
if (intersect.size() == 0) {
log(throwToken, String.format(UNUSED_CAUGHT_EXCEPTION_ERROR, caughtExceptionVariableName));
}
});
} | final List<String> wrappedExceptions = | public void visitToken(DetailAST catchBlockToken) {
final DetailAST catchStatement = catchBlockToken.findFirstToken(TokenTypes.PARAMETER_DEF);
final String caughtExceptionVariableName = catchStatement.findFirstToken(TokenTypes.IDENT).getText();
final List<DetailAST> throwStatements = getThrowStatements(catchBlockToken);
final List<String> wrappedExceptions =
getWrappedExceptions(catchBlockToken, catchBlockToken, caughtExceptionVariableName);
throwStatements.forEach(throwToken -> {
final List<String> throwParamNames = new LinkedList<>();
getThrowParamNames(throwToken, throwParamNames);
wrappedExceptions.add(caughtExceptionVariableName);
List<String> intersect =
wrappedExceptions.stream().filter(throwParamNames::contains).collect(Collectors.toList());
if (intersect.size() == 0) {
log(throwToken, String.format(UNUSED_CAUGHT_EXCEPTION_ERROR, caughtExceptionVariableName));
}
});
} | class UseCaughtExceptionCauseCheck extends AbstractCheck {
static final String UNUSED_CAUGHT_EXCEPTION_ERROR = "Should use the current exception cause \"%s\".";
@Override
public int[] getDefaultTokens() {
return getRequiredTokens();
}
@Override
public int[] getAcceptableTokens() {
return getRequiredTokens();
}
@Override
public int[] getRequiredTokens() {
return new int[] {TokenTypes.LITERAL_CATCH};
}
@Override
/**
* Returns the list of exceptions that wrapped the current exception tokens
*
* @param detailAST catch block throw parent token
* @param caughtExceptionVariableName list containing the exception tokens
* @return list of wrapped exception tokens
*/
private List<String> getWrappedExceptions(DetailAST currentCatchAST, DetailAST detailAST,
String caughtExceptionVariableName) {
final List<String> wrappedExceptionNames = new LinkedList<>();
for (DetailAST currentNode : getChildrenNodes(detailAST)) {
if (currentNode.getType() == TokenTypes.IDENT
&& currentNode.getText().equals(caughtExceptionVariableName)) {
getWrappedExceptionVariable(currentCatchAST, wrappedExceptionNames, currentNode);
}
if (currentNode.getFirstChild() != null) {
wrappedExceptionNames.addAll(
getWrappedExceptions(currentCatchAST, currentNode, caughtExceptionVariableName));
}
}
return wrappedExceptionNames;
}
/**
* Returns the wrapped exception variable name
*/
private void getWrappedExceptionVariable(DetailAST currentCatchBlock, List<String> wrappedExceptionNames,
DetailAST currentToken) {
DetailAST temp = currentToken;
while (!temp.equals(currentCatchBlock) && temp.getType() != TokenTypes.ASSIGN) {
temp = temp.getParent();
}
if (temp.getType() == TokenTypes.ASSIGN) {
final DetailAST wrappedException;
if (temp.getParent().getType() == TokenTypes.VARIABLE_DEF) {
wrappedException = temp.getParent().findFirstToken(TokenTypes.IDENT);
} else {
wrappedException = temp.findFirstToken(TokenTypes.IDENT);
}
if (wrappedException != null) {
wrappedExceptionNames.add(wrappedException.getText());
}
}
}
/**
* Returns the parameter names for current throw keyword.
*
* @param throwParent The parent throw token
* @param paramNames The list containing the parameter names
* @return list of throw param names
*/
private List<String> getThrowParamNames(DetailAST throwParent, List<String> paramNames) {
getChildrenNodes(throwParent).forEach(currentNode -> {
if (currentNode.getType() == TokenTypes.IDENT) {
paramNames.add(currentNode.getText());
}
if (currentNode.getFirstChild() != null) {
getThrowParamNames(currentNode, paramNames);
}
});
return paramNames;
}
/**
* Recursive method that searches for all the LITERAL_THROW on the current catch token.
*
* @param catchBlockToken A start token.
* @return list of throw tokens
*/
private List<DetailAST> getThrowStatements(DetailAST catchBlockToken) {
final List<DetailAST> throwStatements = new LinkedList<>();
getChildrenNodes(catchBlockToken).forEach(currentNode -> {
if (TokenTypes.LITERAL_THROW == currentNode.getType()) {
throwStatements.add(currentNode);
}
if (currentNode.getFirstChild() != null) {
throwStatements.addAll(getThrowStatements(currentNode));
}
});
return throwStatements;
}
/**
* Gets all the children of the current parent node.
*
* @param token parent node.
* @return List of children of the current node.
*/
private static List<DetailAST> getChildrenNodes(DetailAST token) {
final List<DetailAST> result = new LinkedList<>();
DetailAST currNode = token.getFirstChild();
while (currNode != null) {
result.add(currNode);
currNode = currNode.getNextSibling();
}
return result;
}
} | class UseCaughtExceptionCauseCheck extends AbstractCheck {
static final String UNUSED_CAUGHT_EXCEPTION_ERROR = "Caught and rethrown exceptions should include the caught"
+ " exception as the cause in the rethrown exception. Dropping the causal exception makes it more difficult"
+ " to troubleshoot issues when they arise. Include the caught exception variable %s as the cause.";
@Override
public int[] getDefaultTokens() {
return getRequiredTokens();
}
@Override
public int[] getAcceptableTokens() {
return getRequiredTokens();
}
@Override
public int[] getRequiredTokens() {
return new int[] {TokenTypes.LITERAL_CATCH};
}
@Override
/**
* Returns the list of exceptions that wrapped the current exception tokens
*
* @param currentCatchAST current catch block token
* @param detailAST catch block throw parent token
* @param caughtExceptionVariableName list containing the exception tokens
* @return list of wrapped exception tokens
*/
private List<String> getWrappedExceptions(DetailAST currentCatchAST, DetailAST detailAST,
String caughtExceptionVariableName) {
final List<String> wrappedExceptionNames = new LinkedList<>();
for (DetailAST currentNode : getChildrenNodes(detailAST)) {
if (currentNode.getType() == TokenTypes.IDENT
&& currentNode.getText().equals(caughtExceptionVariableName)) {
getWrappedExceptionVariable(currentCatchAST, wrappedExceptionNames, currentNode);
}
if (currentNode.getFirstChild() != null) {
wrappedExceptionNames.addAll(
getWrappedExceptions(currentCatchAST, currentNode, caughtExceptionVariableName));
}
}
return wrappedExceptionNames;
}
/**
* Returns the wrapped exception variable name
*/
private void getWrappedExceptionVariable(DetailAST currentCatchBlock, List<String> wrappedExceptionNames,
DetailAST currentToken) {
DetailAST temp = currentToken;
while (!temp.equals(currentCatchBlock) && temp.getType() != TokenTypes.ASSIGN) {
temp = temp.getParent();
}
if (temp.getType() == TokenTypes.ASSIGN) {
final DetailAST wrappedException;
if (temp.getParent().getType() == TokenTypes.VARIABLE_DEF) {
wrappedException = temp.getParent().findFirstToken(TokenTypes.IDENT);
} else if (temp.findFirstToken(TokenTypes.DOT) != null) {
wrappedException = temp.findFirstToken(TokenTypes.DOT).findFirstToken(TokenTypes.IDENT);
} else {
wrappedException = temp.findFirstToken(TokenTypes.IDENT);
}
if (wrappedException != null) {
wrappedExceptionNames.add(wrappedException.getText());
}
}
}
/**
* Returns the parameter names for current throw keyword.
*
* @param throwParent The parent throw token
* @param paramNames The list containing the parameter names
* @return list of throw param names
*/
private List<String> getThrowParamNames(DetailAST throwParent, List<String> paramNames) {
getChildrenNodes(throwParent).forEach(currentNode -> {
if (currentNode.getType() == TokenTypes.IDENT) {
paramNames.add(currentNode.getText());
}
if (currentNode.getFirstChild() != null) {
getThrowParamNames(currentNode, paramNames);
}
});
return paramNames;
}
/**
* Recursive method that searches for all the LITERAL_THROW on the current catch token.
*
* @param catchBlockToken A start token.
* @return list of throw tokens
*/
private List<DetailAST> getThrowStatements(DetailAST catchBlockToken) {
final List<DetailAST> throwStatements = new LinkedList<>();
getChildrenNodes(catchBlockToken).forEach(currentNode -> {
if (TokenTypes.LITERAL_THROW == currentNode.getType()) {
throwStatements.add(currentNode);
}
if (currentNode.getFirstChild() != null) {
throwStatements.addAll(getThrowStatements(currentNode));
}
});
return throwStatements;
}
/**
* Gets all the children by traversing the tree generated from the current parent node.
*
* @param token parent node.
* @return List of children of the current node.
*/
private static List<DetailAST> getChildrenNodes(DetailAST token) {
final List<DetailAST> result = new LinkedList<>();
DetailAST currNode = token.getFirstChild();
while (currNode != null) {
result.add(currNode);
currNode = currNode.getNextSibling();
}
return result;
}
} |
Should also check that the endpoint is valid in the setter [Example of verifying endpoint](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/SearchClientBuilder.java#L215) | public SipRoutingClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
} | this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); | public SipRoutingClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex));
}
this.endpoint = endpoint;
return this;
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline pipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code endpoint} is {@code null}.
*/
@Override
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param pipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code clientOptions} is {@code null}.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.pipeline != null) {
return this.pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policyList.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policyList.add(this.createRequestIdPolicy());
policyList.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policyList.add(this.createAuthenticationPolicy());
policyList.add(this.createCookiePolicy());
if (this.additionalPolicies.size() > 0) {
policyList.addAll(this.additionalPolicies);
}
policyList.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policyList.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or it cannot be parsed into a valid URL.
*/
@Override
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.httpPipeline != null) {
return this.httpPipeline;
}
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = CoreUtils.getApplicationId(buildClientOptions, buildLogOptions);
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policies.add(this.createRequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(this.createAuthenticationPolicy());
policies.add(this.createCookiePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} |
Same thing as HttpPipeline being set to null after being non-null | public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
} | this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null."); | public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline pipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code endpoint} is {@code null}.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param pipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code clientOptions} is {@code null}.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.pipeline != null) {
return this.pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policyList.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policyList.add(this.createRequestIdPolicy());
policyList.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policyList.add(this.createAuthenticationPolicy());
policyList.add(this.createCookiePolicy());
if (this.additionalPolicies.size() > 0) {
policyList.addAll(this.additionalPolicies);
}
policyList.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policyList.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or it cannot be parsed into a valid URL.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.httpPipeline != null) {
return this.httpPipeline;
}
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = CoreUtils.getApplicationId(buildClientOptions, buildLogOptions);
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policies.add(this.createRequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(this.createAuthenticationPolicy());
policies.add(this.createCookiePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} |
`clientOptions` should be allowed to be null as they don't carry any required configuration | public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
} | this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); | public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} |
`HttpPipelinePolicy`s have a concepts of both before retry and after retry that will need to be handled here and during building of the client. [Example of handling during `addPolicy`](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/SearchClientBuilder.java#L336) [Example of per-call policy adding](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/implementation/util/Utility.java#L131) [Example of per-request and retry policy adding](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/implementation/util/Utility.java#L150) This allows for finer grain handling when constructing a client as you may have a custom policy the keeps track of all network request made which needs to be added per-request/retry and a custom policy of number of client APIs called that should only happen once per client method call (or a few times depending on if the client API makes multiple calls). This made up example would be helpful in troubleshooting networking issues if you're seeing 4 network requests per client API call. | public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
} | this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null.")); | public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline pipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code endpoint} is {@code null}.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param pipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code clientOptions} is {@code null}.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.pipeline != null) {
return this.pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policyList.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policyList.add(this.createRequestIdPolicy());
policyList.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policyList.add(this.createAuthenticationPolicy());
policyList.add(this.createCookiePolicy());
if (this.additionalPolicies.size() > 0) {
policyList.addAll(this.additionalPolicies);
}
policyList.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policyList.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or it cannot be parsed into a valid URL.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.httpPipeline != null) {
return this.httpPipeline;
}
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = CoreUtils.getApplicationId(buildClientOptions, buildLogOptions);
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policies.add(this.createRequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(this.createAuthenticationPolicy());
policies.add(this.createCookiePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} |
You may want to use a newer version of Autorest, I see the Swagger README shows `4.0.40` being used, which offers cleaner code generation, this whole block is replaceable and replaced with `Mono.justOrEmpty(res.getValue())`, fixes, and performance optimizations. The last version in `4.0.x` is `4.0.62` and the newest release is `4.1.2`. | public Mono<SipConfiguration> getSipConfigurationAsync(Context context) {
return getSipConfigurationWithResponseAsync(context)
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
} | } | public Mono<SipConfiguration> getSipConfigurationAsync(Context context) {
return getSipConfigurationWithResponseAsync(context).flatMap(res -> Mono.justOrEmpty(res.getValue()));
} | class SipRoutingAdminClientImpl {
/** The proxy service used to perform REST calls. */
private final SipRoutingAdminClientService service;
/** The communication resource, for example https:
private final String endpoint;
/**
* Gets The communication resource, for example https:
*
* @return the endpoint value.
*/
public String getEndpoint() {
return this.endpoint;
}
/** Api Version. */
private final String apiVersion;
/**
* Gets Api Version.
*
* @return the apiVersion value.
*/
public String getApiVersion() {
return this.apiVersion;
}
/** The HTTP pipeline to send requests through. */
private final HttpPipeline httpPipeline;
/**
* Gets The HTTP pipeline to send requests through.
*
* @return the httpPipeline value.
*/
public HttpPipeline getHttpPipeline() {
return this.httpPipeline;
}
/** The serializer to serialize an object into a string. */
private final SerializerAdapter serializerAdapter;
/**
* Gets The serializer to serialize an object into a string.
*
* @return the serializerAdapter value.
*/
public SerializerAdapter getSerializerAdapter() {
return this.serializerAdapter;
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(String endpoint, String apiVersion) {
this(
new HttpPipelineBuilder()
.policies(new UserAgentPolicy(), new RetryPolicy(), new CookiePolicy())
.build(),
JacksonAdapter.createDefaultSerializerAdapter(),
endpoint,
apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(HttpPipeline httpPipeline, String endpoint, String apiVersion) {
this(httpPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param serializerAdapter The serializer to serialize an object into a string.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(
HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, String endpoint, String apiVersion) {
this.httpPipeline = httpPipeline;
this.serializerAdapter = serializerAdapter;
this.endpoint = endpoint;
this.apiVersion = apiVersion;
this.service =
RestProxy.create(SipRoutingAdminClientService.class, this.httpPipeline, this.getSerializerAdapter());
}
/**
* The interface defining all the services for SipRoutingAdminClient to be used by the proxy service to perform REST
* calls.
*/
@Host("{endpoint}")
@ServiceInterface(name = "SipRoutingAdminClien")
private interface SipRoutingAdminClientService {
@Get("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> getSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Patch("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(
value = CommunicationErrorResponseException.class,
code = {415, 422, 500})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> patchSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@BodyParam("application/merge-patch+json") SipConfiguration body,
@HeaderParam("Accept") String accept,
Context context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync() {
final String accept = "application/json";
return FluxUtil.withContext(
context -> service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context));
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync(Context context) {
final String accept = "application/json";
return service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> getSipConfigurationAsync() {
return getSipConfigurationWithResponseAsync()
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration getSipConfiguration() {
return getSipConfigurationAsync().block();
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> getSipConfigurationWithResponse(Context context) {
return getSipConfigurationWithResponseAsync(context).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(SipConfiguration body) {
final String accept = "application/json";
return FluxUtil.withContext(
context ->
service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(
SipConfiguration body, Context context) {
final String accept = "application/json";
return service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context);
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body) {
return patchSipConfigurationWithResponseAsync(body)
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context)
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration patchSipConfiguration(SipConfiguration body) {
return patchSipConfigurationAsync(body).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> patchSipConfigurationWithResponse(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context).block();
}
} | class SipRoutingAdminClientImpl {
/** The proxy service used to perform REST calls. */
private final SipRoutingAdminClientService service;
/** The communication resource, for example https:
private final String endpoint;
/**
* Gets The communication resource, for example https:
*
* @return the endpoint value.
*/
public String getEndpoint() {
return this.endpoint;
}
/** Api Version. */
private final String apiVersion;
/**
* Gets Api Version.
*
* @return the apiVersion value.
*/
public String getApiVersion() {
return this.apiVersion;
}
/** The HTTP pipeline to send requests through. */
private final HttpPipeline httpPipeline;
/**
* Gets The HTTP pipeline to send requests through.
*
* @return the httpPipeline value.
*/
public HttpPipeline getHttpPipeline() {
return this.httpPipeline;
}
/** The serializer to serialize an object into a string. */
private final SerializerAdapter serializerAdapter;
/**
* Gets The serializer to serialize an object into a string.
*
* @return the serializerAdapter value.
*/
public SerializerAdapter getSerializerAdapter() {
return this.serializerAdapter;
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(String endpoint, String apiVersion) {
this(
new HttpPipelineBuilder()
.policies(new UserAgentPolicy(), new RetryPolicy(), new CookiePolicy())
.build(),
JacksonAdapter.createDefaultSerializerAdapter(),
endpoint,
apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(HttpPipeline httpPipeline, String endpoint, String apiVersion) {
this(httpPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param serializerAdapter The serializer to serialize an object into a string.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(
HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, String endpoint, String apiVersion) {
this.httpPipeline = httpPipeline;
this.serializerAdapter = serializerAdapter;
this.endpoint = endpoint;
this.apiVersion = apiVersion;
this.service =
RestProxy.create(SipRoutingAdminClientService.class, this.httpPipeline, this.getSerializerAdapter());
}
/**
* The interface defining all the services for SipRoutingAdminClient to be used by the proxy service to perform REST
* calls.
*/
@Host("{endpoint}")
@ServiceInterface(name = "SipRoutingAdminClien")
public interface SipRoutingAdminClientService {
@Get("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> getSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Patch("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(
value = CommunicationErrorResponseException.class,
code = {500, 422, 415})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> patchSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@BodyParam("application/merge-patch+json") SipConfiguration body,
@HeaderParam("Accept") String accept,
Context context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource along with {@link Response} on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync() {
final String accept = "application/json";
return FluxUtil.withContext(
context -> service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context));
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource along with {@link Response} on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync(Context context) {
final String accept = "application/json";
return service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> getSipConfigurationAsync() {
return getSipConfigurationWithResponseAsync().flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration getSipConfiguration() {
return getSipConfigurationAsync().block();
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource along with {@link Response}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> getSipConfigurationWithResponse(Context context) {
return getSipConfigurationWithResponseAsync(context).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list along with {@link Response} on successful
* completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(SipConfiguration body) {
final String accept = "application/json";
return FluxUtil.withContext(
context ->
service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list along with {@link Response} on successful
* completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(
SipConfiguration body, Context context) {
final String accept = "application/json";
return service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context);
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body) {
return patchSipConfigurationWithResponseAsync(body).flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context).flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration patchSipConfiguration(SipConfiguration body) {
return patchSipConfigurationAsync(body).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list along with {@link Response}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> patchSipConfigurationWithResponse(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context).block();
}
} |
Check implemented. https://github.com/Azure/azure-sdk-for-java/pull/26819/commits/2ccd2e7dff1b951ba7a23bf6b184308728e1b240 | public SipRoutingClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
} | this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); | public SipRoutingClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex));
}
this.endpoint = endpoint;
return this;
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline pipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code endpoint} is {@code null}.
*/
@Override
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param pipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code clientOptions} is {@code null}.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.pipeline != null) {
return this.pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policyList.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policyList.add(this.createRequestIdPolicy());
policyList.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policyList.add(this.createAuthenticationPolicy());
policyList.add(this.createCookiePolicy());
if (this.additionalPolicies.size() > 0) {
policyList.addAll(this.additionalPolicies);
}
policyList.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policyList.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or it cannot be parsed into a valid URL.
*/
@Override
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.httpPipeline != null) {
return this.httpPipeline;
}
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = CoreUtils.getApplicationId(buildClientOptions, buildLogOptions);
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policies.add(this.createRequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(this.createAuthenticationPolicy());
policies.add(this.createCookiePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} |
Log added. https://github.com/Azure/azure-sdk-for-java/pull/26819/commits/55c237da4a6c3d1e1fa3b5492c4bb39270fabf5b | public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
} | this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null."); | public SipRoutingClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline pipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code endpoint} is {@code null}.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param pipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
this.additionalPolicies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code clientOptions} is {@code null}.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.pipeline != null) {
return this.pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
policyList.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policyList.add(this.createRequestIdPolicy());
policyList.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policyList.add(this.createAuthenticationPolicy());
policyList.add(this.createCookiePolicy());
if (this.additionalPolicies.size() > 0) {
policyList.addAll(this.additionalPolicies);
}
policyList.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policyList.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} | class SipRoutingClientBuilder implements
AzureKeyCredentialTrait<SipRoutingClientBuilder>,
ConfigurationTrait<SipRoutingClientBuilder>,
ConnectionStringTrait<SipRoutingClientBuilder>,
EndpointTrait<SipRoutingClientBuilder>,
HttpTrait<SipRoutingClientBuilder>,
TokenCredentialTrait<SipRoutingClientBuilder> {
private static final String APP_CONFIG_PROPERTIES = "azure-communication-phonenumbers-siprouting.properties";
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final ClientLogger logger = new ClientLogger(SipRoutingClientBuilder.class);
private SipRoutingServiceVersion version = SipRoutingServiceVersion.getLatest();
private String endpoint;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private Configuration configuration;
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
/**
* Sets endpoint of the service
*
* @param endpoint url of the service
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or it cannot be parsed into a valid URL.
*/
@Override
public SipRoutingClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all other settings aside from
* {@link SipRoutingClientBuilder
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is {@code null}.
*/
@Override
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is {@code null}.
*/
@Override
public SipRoutingClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Set the endpoint and AzureKeyCredential for authorization.
*
* @param connectionString connection string in the format "endpoint={endpoint_value};accesskey={accesskey_value}"
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code connectionString} is {@code null}.
*/
@Override
public SipRoutingClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
this
.endpoint(endpoint)
.credential(new AzureKeyCredential(accessKey));
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
@Override
public SipRoutingClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @return The updated {@link SipRoutingClientBuilder} object.
* @see HttpClientOptions
*/
@Override
public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the {@link SipRoutingServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link SipRoutingServiceVersion} of the service to be used when making requests.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder serviceVersion(SipRoutingServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryPolicy User's retry policy applied to each request.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
public SipRoutingClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return The updated {@link SipRoutingClientBuilder} object.
*/
@Override
public SipRoutingClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Create synchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingClient buildClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createClientImpl(createAdminClientImpl());
}
/**
* Create asynchronous client applying CommunicationClientCredentialPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy.
* Additional HttpPolicies specified by additionalPolicies will be applied after them
*
* @return {@link SipRoutingAsyncClient} instance.
* @throws IllegalStateException If both {@link
* and {@link
*/
public SipRoutingAsyncClient buildAsyncClient() {
validateRequiredFields();
if (this.version != null) {
logger.info("Build client for service version" + this.version.getVersion());
}
return createAsyncClientImpl(createAdminClientImpl());
}
SipRoutingClient createClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingClient(adminClientImpl);
}
SipRoutingAsyncClient createAsyncClientImpl(SipRoutingAdminClientImpl adminClientImpl) {
return new SipRoutingAsyncClient(adminClientImpl);
}
HttpPipelinePolicy createAuthenticationPolicy() {
if (this.tokenCredential != null && this.azureKeyCredential != null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
if (this.tokenCredential != null) {
return new BearerTokenAuthenticationPolicy(
this.tokenCredential, "https:
} else if (this.azureKeyCredential != null) {
return new HmacAuthenticationPolicy(this.azureKeyCredential);
} else {
throw logger.logExceptionAsError(
new NullPointerException("Missing credential information while building a client."));
}
}
UserAgentPolicy createUserAgentPolicy(
String applicationId, String sdkName, String sdkVersion, Configuration configuration) {
return new UserAgentPolicy(applicationId, sdkName, sdkVersion, configuration);
}
HttpPipelinePolicy createRequestIdPolicy() {
return new RequestIdPolicy();
}
CookiePolicy createCookiePolicy() {
return new CookiePolicy();
}
HttpLoggingPolicy createHttpLoggingPolicy(HttpLogOptions httpLogOptions) {
return new HttpLoggingPolicy(httpLogOptions);
}
HttpLogOptions createDefaultHttpLogOptions() {
return new HttpLogOptions();
}
private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
}
private SipRoutingAdminClientImpl createAdminClientImpl() {
return new SipRoutingAdminClientImplBuilder()
.endpoint(this.endpoint)
.pipeline(this.createHttpPipeline())
.buildClient();
}
private HttpPipeline createHttpPipeline() {
if (this.httpPipeline != null) {
return this.httpPipeline;
}
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = CoreUtils.getApplicationId(buildClientOptions, buildLogOptions);
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(this.createUserAgentPolicy(
applicationId,
PROPERTIES.get(SDK_NAME),
PROPERTIES.get(SDK_VERSION),
this.configuration
));
policies.add(this.createRequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(this.createAuthenticationPolicy());
policies.add(this.createCookiePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(this.createHttpLoggingPolicy(this.getHttpLogOptions()));
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(this.httpClient)
.clientOptions(clientOptions)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (this.httpLogOptions == null) {
this.httpLogOptions = this.createDefaultHttpLogOptions();
}
return this.httpLogOptions;
}
} |
Altered https://github.com/Azure/azure-sdk-for-java/pull/26819/commits/fcbb12feb1c709eca841ca321a3b85ff746533e1 | public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
} | this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); | public SipRoutingClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} |
Upgraded to @autorest/java@4.1.3 https://github.com/Azure/azure-sdk-for-java/pull/26819/commits/723d49e7fca90431bffa3366f19bcad1d20ddfba Please verify. | public Mono<SipConfiguration> getSipConfigurationAsync(Context context) {
return getSipConfigurationWithResponseAsync(context)
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
} | } | public Mono<SipConfiguration> getSipConfigurationAsync(Context context) {
return getSipConfigurationWithResponseAsync(context).flatMap(res -> Mono.justOrEmpty(res.getValue()));
} | class SipRoutingAdminClientImpl {
/** The proxy service used to perform REST calls. */
private final SipRoutingAdminClientService service;
/** The communication resource, for example https:
private final String endpoint;
/**
* Gets The communication resource, for example https:
*
* @return the endpoint value.
*/
public String getEndpoint() {
return this.endpoint;
}
/** Api Version. */
private final String apiVersion;
/**
* Gets Api Version.
*
* @return the apiVersion value.
*/
public String getApiVersion() {
return this.apiVersion;
}
/** The HTTP pipeline to send requests through. */
private final HttpPipeline httpPipeline;
/**
* Gets The HTTP pipeline to send requests through.
*
* @return the httpPipeline value.
*/
public HttpPipeline getHttpPipeline() {
return this.httpPipeline;
}
/** The serializer to serialize an object into a string. */
private final SerializerAdapter serializerAdapter;
/**
* Gets The serializer to serialize an object into a string.
*
* @return the serializerAdapter value.
*/
public SerializerAdapter getSerializerAdapter() {
return this.serializerAdapter;
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(String endpoint, String apiVersion) {
this(
new HttpPipelineBuilder()
.policies(new UserAgentPolicy(), new RetryPolicy(), new CookiePolicy())
.build(),
JacksonAdapter.createDefaultSerializerAdapter(),
endpoint,
apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(HttpPipeline httpPipeline, String endpoint, String apiVersion) {
this(httpPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param serializerAdapter The serializer to serialize an object into a string.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(
HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, String endpoint, String apiVersion) {
this.httpPipeline = httpPipeline;
this.serializerAdapter = serializerAdapter;
this.endpoint = endpoint;
this.apiVersion = apiVersion;
this.service =
RestProxy.create(SipRoutingAdminClientService.class, this.httpPipeline, this.getSerializerAdapter());
}
/**
* The interface defining all the services for SipRoutingAdminClient to be used by the proxy service to perform REST
* calls.
*/
@Host("{endpoint}")
@ServiceInterface(name = "SipRoutingAdminClien")
private interface SipRoutingAdminClientService {
@Get("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> getSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Patch("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(
value = CommunicationErrorResponseException.class,
code = {415, 422, 500})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> patchSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@BodyParam("application/merge-patch+json") SipConfiguration body,
@HeaderParam("Accept") String accept,
Context context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync() {
final String accept = "application/json";
return FluxUtil.withContext(
context -> service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context));
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync(Context context) {
final String accept = "application/json";
return service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> getSipConfigurationAsync() {
return getSipConfigurationWithResponseAsync()
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration getSipConfiguration() {
return getSipConfigurationAsync().block();
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> getSipConfigurationWithResponse(Context context) {
return getSipConfigurationWithResponseAsync(context).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(SipConfiguration body) {
final String accept = "application/json";
return FluxUtil.withContext(
context ->
service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(
SipConfiguration body, Context context) {
final String accept = "application/json";
return service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context);
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body) {
return patchSipConfigurationWithResponseAsync(body)
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context)
.flatMap(
(Response<SipConfiguration> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration patchSipConfiguration(SipConfiguration body) {
return patchSipConfigurationAsync(body).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 415, 422,
* 500.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> patchSipConfigurationWithResponse(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context).block();
}
} | class SipRoutingAdminClientImpl {
/** The proxy service used to perform REST calls. */
private final SipRoutingAdminClientService service;
/** The communication resource, for example https:
private final String endpoint;
/**
* Gets The communication resource, for example https:
*
* @return the endpoint value.
*/
public String getEndpoint() {
return this.endpoint;
}
/** Api Version. */
private final String apiVersion;
/**
* Gets Api Version.
*
* @return the apiVersion value.
*/
public String getApiVersion() {
return this.apiVersion;
}
/** The HTTP pipeline to send requests through. */
private final HttpPipeline httpPipeline;
/**
* Gets The HTTP pipeline to send requests through.
*
* @return the httpPipeline value.
*/
public HttpPipeline getHttpPipeline() {
return this.httpPipeline;
}
/** The serializer to serialize an object into a string. */
private final SerializerAdapter serializerAdapter;
/**
* Gets The serializer to serialize an object into a string.
*
* @return the serializerAdapter value.
*/
public SerializerAdapter getSerializerAdapter() {
return this.serializerAdapter;
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(String endpoint, String apiVersion) {
this(
new HttpPipelineBuilder()
.policies(new UserAgentPolicy(), new RetryPolicy(), new CookiePolicy())
.build(),
JacksonAdapter.createDefaultSerializerAdapter(),
endpoint,
apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(HttpPipeline httpPipeline, String endpoint, String apiVersion) {
this(httpPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, apiVersion);
}
/**
* Initializes an instance of SipRoutingAdminClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param serializerAdapter The serializer to serialize an object into a string.
* @param endpoint The communication resource, for example https:
* @param apiVersion Api Version.
*/
SipRoutingAdminClientImpl(
HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, String endpoint, String apiVersion) {
this.httpPipeline = httpPipeline;
this.serializerAdapter = serializerAdapter;
this.endpoint = endpoint;
this.apiVersion = apiVersion;
this.service =
RestProxy.create(SipRoutingAdminClientService.class, this.httpPipeline, this.getSerializerAdapter());
}
/**
* The interface defining all the services for SipRoutingAdminClient to be used by the proxy service to perform REST
* calls.
*/
@Host("{endpoint}")
@ServiceInterface(name = "SipRoutingAdminClien")
public interface SipRoutingAdminClientService {
@Get("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> getSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@HeaderParam("Accept") String accept,
Context context);
@Patch("/sip")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(
value = CommunicationErrorResponseException.class,
code = {500, 422, 415})
@UnexpectedResponseExceptionType(CommunicationErrorResponseException.class)
Mono<Response<SipConfiguration>> patchSipConfiguration(
@HostParam("endpoint") String endpoint,
@QueryParam("api-version") String apiVersion,
@BodyParam("application/merge-patch+json") SipConfiguration body,
@HeaderParam("Accept") String accept,
Context context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource along with {@link Response} on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync() {
final String accept = "application/json";
return FluxUtil.withContext(
context -> service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context));
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource along with {@link Response} on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> getSipConfigurationWithResponseAsync(Context context) {
final String accept = "application/json";
return service.getSipConfiguration(this.getEndpoint(), this.getApiVersion(), accept, context);
}
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> getSipConfigurationAsync() {
return getSipConfigurationWithResponseAsync().flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Gets SIP configuration for resource.
*
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration getSipConfiguration() {
return getSipConfigurationAsync().block();
}
/**
* Gets SIP configuration for resource.
*
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return sIP configuration for resource along with {@link Response}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> getSipConfigurationWithResponse(Context context) {
return getSipConfigurationWithResponseAsync(context).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list along with {@link Response} on successful
* completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(SipConfiguration body) {
final String accept = "application/json";
return FluxUtil.withContext(
context ->
service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list along with {@link Response} on successful
* completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<SipConfiguration>> patchSipConfigurationWithResponseAsync(
SipConfiguration body, Context context) {
final String accept = "application/json";
return service.patchSipConfiguration(this.getEndpoint(), this.getApiVersion(), body, accept, context);
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body) {
return patchSipConfigurationWithResponseAsync(body).flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SipConfiguration> patchSipConfigurationAsync(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context).flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public SipConfiguration patchSipConfiguration(SipConfiguration body) {
return patchSipConfigurationAsync(body).block();
}
/**
* Patches SIP configuration for resource.
*
* @param body Configuration patch.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server.
* @throws CommunicationErrorResponseException thrown if the request is rejected by server on status code 500, 422,
* 415.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return represents a SIP configuration. When a call is being routed the routes are applied in the same order as
* in the routes list. A route is matched by its number pattern. Call is then directed into route's first
* available trunk, based on the order in the route's trunks list along with {@link Response}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<SipConfiguration> patchSipConfigurationWithResponse(SipConfiguration body, Context context) {
return patchSipConfigurationWithResponseAsync(body, context).block();
}
} |
Just a nit: since this are implementation you don't need to remove them, but the methods are so simple they could just be used inline. | private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
} | } | private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} |
This is extracted into methods mainly for testing purposes. For now, I would keep it consistent with PhoneNumbersClientBuilder. | private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
} | } | private void validateRequiredFields() {
Objects.requireNonNull(endpoint);
} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} |
`FluxUtil.collectBytes*` offers an optimized collection | public static Mono<byte[]> downloadToByteArray(BlobContainerAsyncClient client, String blobPath) {
return client.getBlobAsyncClient(blobPath)
.downloadWithResponse(null, null, null, false)
.flatMap(response -> FluxUtil.collectBytesFromNetworkResponse(response.getValue(), response.getHeaders()));
} | .flatMap(response -> FluxUtil.collectBytesFromNetworkResponse(response.getValue(), response.getHeaders())); | public static Mono<byte[]> downloadToByteArray(BlobContainerAsyncClient client, String blobPath) {
return FluxUtil.collectBytesInByteBufferStream(client.getBlobAsyncClient(blobPath).download());
} | class DownloadUtils {
private static final ClientLogger LOGGER = new ClientLogger(DownloadUtils.class);
private static final ObjectMapper MAPPER = new ObjectMapper();
/**
* Reduces a Flux of ByteBuffer into a Mono of String
*/
public static Mono<JsonNode> parseJson(byte[] json) {
try {
JsonNode jsonNode = MAPPER.reader().readTree(json);
return Mono.just(jsonNode);
} catch (IOException e) {
return FluxUtil.monoError(LOGGER, new UncheckedIOException(e));
}
}
} | class DownloadUtils {
private static final ClientLogger LOGGER = new ClientLogger(DownloadUtils.class);
private static final ObjectMapper MAPPER = new ObjectMapper();
/**
* Reduces a Flux of ByteBuffer into a Mono of String
*/
public static Mono<JsonNode> parseJson(byte[] json) {
try {
JsonNode jsonNode = MAPPER.reader().readTree(json);
return Mono.just(jsonNode);
} catch (IOException e) {
return FluxUtil.monoError(LOGGER, new UncheckedIOException(e));
}
}
} |
I feel we can skip initializing this here as it gets initialized in the client construction before init here: https://github.com/Azure/azure-sdk-for-java/blob/ff077373038ff09cf967ddab3d3cf4f0649ca60a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/AsyncDocumentClient.java#L230 -> https://github.com/Azure/azure-sdk-for-java/blob/ff077373038ff09cf967ddab3d3cf4f0649ca60a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentClientImpl.java#L373 So the fix just would be deleting the line that initializes queryPlanCache here ```suggestion ``` | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(),
null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE));
this.retryPolicy.setRxCollectionCache(this.collectionCache);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE)); | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(),
ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(),
connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(),
null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private ApiType apiType;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.getAndDecrement();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE));
this.apiType = apiType;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this, this.globalEndpointManager);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache);
((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig);
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this,
getOperationContextAndListenerTuple(options));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId),
invalidPartitionExceptionRetryPolicy);
}
private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) {
queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null &&
options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
public static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
switch (partitionKeyDefinition.getKind()) {
case HASH:
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
break;
case MULTI_HASH:
Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()];
for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){
String partitionPath = partitionKeyDefinition.getPaths().get(pathIter);
List<String> partitionPathParts = PathParser.getPathParts(partitionPath);
partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts);
}
return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false);
default:
throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind());
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PATCH);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
Document.class,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<Document>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
Document.class,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
retryPolicy);
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
this.storeModel.enableThroughputControl(throughputControlStore);
}
this.throughputControlStore.enableThroughputControlGroup(group);
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private ApiType apiType;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs,
credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType) {
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.getAndDecrement();
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE));
this.apiType = apiType;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return BridgeInternal.createCosmosDiagnostics(this, this.globalEndpointManager);
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache);
((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry
);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig);
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this,
getOperationContextAndListenerTuple(options));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId),
invalidPartitionExceptionRetryPolicy);
}
private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal(
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) {
queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
return iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if(options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null &&
options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
public static PartitionKeyInternal extractPartitionKeyValueFromDocument(
InternalObjectNode document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
switch (partitionKeyDefinition.getKind()) {
case HASH:
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
break;
case MULTI_HASH:
Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()];
for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){
String partitionPath = partitionKeyDefinition.getPaths().get(pathIter);
List<String> partitionPathParts = PathParser.getPathParts(partitionPath);
partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts);
}
return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false);
default:
throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind());
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeaders(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeaders(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PATCH);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs.flatMap(req -> patch(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs);
return requestObs.flatMap(req -> this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
CosmosQueryRequestOptions options,
Class<T> klass) {
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemIdentityList
.forEach(itemIdentity -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
return createReadManyQuery(
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
Document.class,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<Document>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
final CosmosQueryRequestOptions effectiveOptions =
ModelBridgeInternal.createQueryRequestOptions(options);
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
Document.class,
ResourceType.Document,
queryClient,
activityId);
});
},
invalidPartitionExceptionRetryPolicy);
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
List<Object> procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, CosmosQueryRequestOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
CosmosQueryRequestOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query,
CosmosQueryRequestOptions options) {
return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
CosmosQueryRequestOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
CosmosQueryRequestOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new CosmosQueryRequestOptions();
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options;
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper
.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
retryPolicy);
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
@Override
public DatabaseAccount getLatestDatabaseAccount() {
return this.globalEndpointManager.getLatestDatabaseAccount();
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeaders(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
this.storeModel.enableThroughputControl(throughputControlStore);
}
this.throughputControlStore.enableThroughputControlGroup(group);
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
} |
looks like we are setting timeSinceEnqueued twice? if attributes.get(KAFKA_RECORD_QUEUE_TIME_MS) is not null, the code is overwriting the value. Does KAFKA_RECORD_QUEUE_TIME_MS have higher precedence? | private void export(SpanData span, List<TelemetryItem> telemetryItems) {
SpanKind kind = span.getKind();
String instrumentationName = span.getInstrumentationLibraryInfo().getName();
if (kind == SpanKind.INTERNAL) {
if (instrumentationName.startsWith("io.opentelemetry.spring-scheduling-")
&& !span.getParentSpanContext().isValid()) {
exportRequest(span, telemetryItems);
} else {
exportRemoteDependency(span, true, telemetryItems);
}
} else if (kind == SpanKind.CLIENT || kind == SpanKind.PRODUCER) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.CONSUMER
&& "receive".equals(span.getAttributes().get(SemanticAttributes.MESSAGING_OPERATION))) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.SERVER || kind == SpanKind.CONSUMER) {
exportRequest(span, telemetryItems);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(kind.name()));
}
}
private void exportRemoteDependency(SpanData span, boolean inProc,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RemoteDependencyData data = new RemoteDependencyData();
initTelemetry(telemetry, data, "RemoteDependency", "RemoteDependencyData");
data.setProperties(new HashMap<>());
float samplingPercentage = 100;
setOperationTags(telemetry, span);
setTime(telemetry, span.getStartEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), span.getAttributes());
addLinks(data.getProperties(), span.getLinks());
data.setId(span.getSpanId());
data.setName(getDependencyName(span));
data.setDuration(
FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos()));
data.setSuccess(getSuccess(span));
if (inProc) {
data.setType("InProc");
} else {
applySemanticConventions(span, data);
}
telemetryItems.add(telemetry);
exportEvents(span, null, telemetryItems);
}
private static final Set<String> DEFAULT_HTTP_SPAN_NAMES =
new HashSet<>(
Arrays.asList(
"HTTP OPTIONS",
"HTTP GET",
"HTTP HEAD",
"HTTP POST",
"HTTP PUT",
"HTTP DELETE",
"HTTP TRACE",
"HTTP CONNECT",
"HTTP PATCH"));
private static String getDependencyName(SpanData span) {
String name = span.getName();
String method = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (method == null) {
return name;
}
if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) {
return name;
}
String url = span.getAttributes().get(SemanticAttributes.HTTP_URL);
if (url == null) {
return name;
}
String path = UrlParser.getPathFromUrl(url);
if (path == null) {
return name;
}
return path.isEmpty() ? method + " /" : method + " " + path;
}
private static void applySemanticConventions(SpanData span, RemoteDependencyData remoteDependencyData) {
Attributes attributes = span.getAttributes();
String httpMethod = attributes.get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null) {
applyHttpClientSpan(attributes, remoteDependencyData);
return;
}
String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM);
if (rpcSystem != null) {
applyRpcClientSpan(attributes, remoteDependencyData, rpcSystem);
return;
}
String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM);
if (dbSystem != null) {
applyDatabaseClientSpan(attributes, remoteDependencyData, dbSystem);
return;
}
String azureNamespace = attributes.get(AZURE_NAMESPACE);
if ("Microsoft.EventHub".equals(azureNamespace)) {
applyEventHubsSpan(attributes, remoteDependencyData);
return;
}
if ("Microsoft.ServiceBus".equals(azureNamespace)) {
applyServiceBusSpan(attributes, remoteDependencyData);
return;
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
applyMessagingClientSpan(attributes, remoteDependencyData, messagingSystem, span.getKind());
return;
}
String target = getTargetFromPeerAttributes(attributes, Integer.MAX_VALUE);
if (target != null) {
remoteDependencyData.setTarget(target);
return;
}
remoteDependencyData.setType("InProc");
}
private static void setOperationTags(TelemetryItem telemetry, SpanData span) {
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getParentSpanContext().getSpanId());
setOperationName(telemetry, span.getAttributes());
}
private static void setOperationId(TelemetryItem telemetry, String traceId) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), traceId);
}
private static void setOperationParentId(TelemetryItem telemetry, String parentSpanId) {
if (SpanId.isValid(parentSpanId)) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId);
}
}
private static void setOperationName(TelemetryItem telemetry, Attributes attributes) {
String operationName = attributes.get(AI_OPERATION_NAME_KEY);
if (operationName != null) {
setOperationName(telemetry, operationName);
}
}
private static void setOperationName(TelemetryItem telemetry, String operationName) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
}
private static void applyHttpClientSpan(Attributes attributes, RemoteDependencyData telemetry) {
String target = getTargetForHttpClientSpan(attributes);
telemetry.setType("Http");
telemetry.setTarget(target);
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode != null) {
telemetry.setResultCode(Long.toString(httpStatusCode));
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
telemetry.setData(url);
}
private static String getTargetForHttpClientSpan(Attributes attributes) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
target = attributes.get(SemanticAttributes.HTTP_HOST);
if (target != null) {
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if ("http".equals(scheme)) {
if (target.endsWith(":80")) {
target = target.substring(0, target.length() - 3);
}
} else if ("https".equals(scheme)) {
if (target.endsWith(":443")) {
target = target.substring(0, target.length() - 4);
}
}
return target;
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
if (url != null) {
target = UrlParser.getTargetFromUrl(url);
if (target != null) {
return target;
}
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
int defaultPort;
if ("http".equals(scheme)) {
defaultPort = 80;
} else if ("https".equals(scheme)) {
defaultPort = 443;
} else {
defaultPort = 0;
}
target = getTargetFromNetAttributes(attributes, defaultPort);
if (target != null) {
return target;
}
return "Http";
}
@Nullable
private static String getTargetFromPeerAttributes(Attributes attributes, int defaultPort) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
return getTargetFromNetAttributes(attributes, defaultPort);
}
@Nullable
private static String getTargetFromPeerService(Attributes attributes) {
return attributes.get(SemanticAttributes.PEER_SERVICE);
}
@Nullable
private static String getTargetFromNetAttributes(Attributes attributes, int defaultPort) {
String target = getHostFromNetAttributes(attributes);
if (target == null) {
return null;
}
Long port = attributes.get(SemanticAttributes.NET_PEER_PORT);
if (port != null && port != defaultPort) {
return target + ":" + port;
}
return target;
}
@Nullable
private static String getHostFromNetAttributes(Attributes attributes) {
String host = attributes.get(SemanticAttributes.NET_PEER_NAME);
if (host != null) {
return host;
}
return attributes.get(SemanticAttributes.NET_PEER_IP);
}
private static void applyRpcClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String rpcSystem) {
telemetry.setType(rpcSystem);
String target = getTargetFromPeerAttributes(attributes, 0);
if (target == null) {
target = rpcSystem;
}
telemetry.setTarget(target);
}
private static void applyDatabaseClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String dbSystem) {
String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT);
String type;
if (SQL_DB_SYSTEMS.contains(dbSystem)) {
if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) {
type = "mysql";
} else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) {
type = "postgresql";
} else {
type = "SQL";
}
} else {
type = dbSystem;
}
telemetry.setType(type);
telemetry.setData(dbStatement);
String target =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, getDefaultPortForDbSystem(dbSystem)),
attributes.get(SemanticAttributes.DB_NAME),
" | ");
if (target == null) {
target = dbSystem;
}
telemetry.setTarget(target);
}
private static void applyMessagingClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String messagingSystem, SpanKind spanKind) {
if (spanKind == SpanKind.PRODUCER) {
telemetry.setType("Queue Message | " + messagingSystem);
} else {
telemetry.setType(messagingSystem);
}
String destination = attributes.get(SemanticAttributes.MESSAGING_DESTINATION);
if (destination != null) {
telemetry.setTarget(destination);
} else {
telemetry.setTarget(messagingSystem);
}
}
private static void applyEventHubsSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("Microsoft.EventHub");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static void applyServiceBusSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("AZURE SERVICE BUS");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static String getAzureSdkTargetSource(Attributes attributes) {
String peerAddress = attributes.get(AZURE_SDK_PEER_ADDRESS);
String destination = attributes.get(AZURE_SDK_MESSAGE_BUS_DESTINATION);
return peerAddress + "/" + destination;
}
private static int getDefaultPortForDbSystem(String dbSystem) {
switch (dbSystem) {
case SemanticAttributes.DbSystemValues.MONGODB:
return 27017;
case SemanticAttributes.DbSystemValues.CASSANDRA:
return 9042;
case SemanticAttributes.DbSystemValues.REDIS:
return 6379;
case SemanticAttributes.DbSystemValues.MARIADB:
case SemanticAttributes.DbSystemValues.MYSQL:
return 3306;
case SemanticAttributes.DbSystemValues.MSSQL:
return 1433;
case SemanticAttributes.DbSystemValues.DB2:
return 50000;
case SemanticAttributes.DbSystemValues.ORACLE:
return 1521;
case SemanticAttributes.DbSystemValues.H2:
return 8082;
case SemanticAttributes.DbSystemValues.DERBY:
return 1527;
case SemanticAttributes.DbSystemValues.POSTGRESQL:
return 5432;
default:
return 0;
}
}
private void exportRequest(SpanData span, List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RequestData data = new RequestData();
initTelemetry(telemetry, data, "Request", "RequestData");
data.setProperties(new HashMap<>());
Attributes attributes = span.getAttributes();
long startEpochNanos = span.getStartEpochNanos();
float samplingPercentage = 100;
data.setId(span.getSpanId());
setTime(telemetry, startEpochNanos);
setExtraAttributes(telemetry, data.getProperties(), attributes);
addLinks(data.getProperties(), span.getLinks());
String operationName = getOperationName(span);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId());
telemetry
.getTags()
.put(
ContextTagKeys.AI_OPERATION_PARENT_ID.toString(),
span.getParentSpanContext().getSpanId());
data.setName(operationName);
data.setDuration(FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos));
data.setSuccess(getSuccess(span));
String httpUrl = getHttpUrlFromServerSpan(attributes);
if (httpUrl != null) {
data.setUrl(httpUrl);
}
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode == null) {
httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE);
}
if (httpStatusCode != null) {
data.setResponseCode(Long.toString(httpStatusCode));
} else {
data.setResponseCode("0");
}
String locationIp = attributes.get(SemanticAttributes.HTTP_CLIENT_IP);
if (locationIp == null) {
locationIp = attributes.get(SemanticAttributes.NET_PEER_IP);
}
if (locationIp != null) {
telemetry.getTags().put(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp);
}
data.setSource(getSource(attributes));
Long enqueuedTime = attributes.get(AZURE_SDK_ENQUEUED_TIME);
if (enqueuedTime != null) {
long timeSinceEnqueuedMillis =
Math.max(
0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime));
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
Long timeSinceEnqueuedMillis = attributes.get(KAFKA_RECORD_QUEUE_TIME_MS);
if (timeSinceEnqueuedMillis != null) {
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
telemetryItems.add(telemetry);
exportEvents(span, operationName, telemetryItems);
}
private boolean getSuccess(SpanData span) {
switch (span.getStatus().getStatusCode()) {
case ERROR:
return false;
case OK:
return true;
case UNSET:
Long statusCode = span.getAttributes().get(SemanticAttributes.HTTP_STATUS_CODE);
return statusCode == null || statusCode < 400;
default:
return true;
}
}
@Nullable
private static String getHttpUrlFromServerSpan(Attributes attributes) {
String httpUrl = attributes.get(SemanticAttributes.HTTP_URL);
if (httpUrl != null) {
return httpUrl;
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if (scheme == null) {
return null;
}
String host = attributes.get(SemanticAttributes.HTTP_HOST);
if (host == null) {
return null;
}
String target = attributes.get(SemanticAttributes.HTTP_TARGET);
if (target == null) {
return null;
}
return scheme + ":
}
private static String getSource(Attributes attributes) {
if (isAzureQueue(attributes)) {
return getAzureSdkTargetSource(attributes);
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
String source =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, 0),
attributes.get(SemanticAttributes.MESSAGING_DESTINATION),
"/");
if (source != null) {
return source;
}
return messagingSystem;
}
return null;
}
private static boolean isAzureQueue(Attributes attributes) {
String azureNamespace = attributes.get(AZURE_NAMESPACE);
return "Microsoft.EventHub".equals(azureNamespace)
|| "Microsoft.ServiceBus".equals(azureNamespace);
}
private static String getOperationName(SpanData span) {
String spanName = span.getName();
String httpMethod = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null && !httpMethod.isEmpty() && spanName.startsWith("/")) {
return httpMethod + " " + spanName;
}
return spanName;
}
private static String nullAwareConcat(String str1, String str2, String separator) {
if (str1 == null) {
return str2;
}
if (str2 == null) {
return str1;
}
return str1 + separator + str2;
}
private void exportEvents(SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) {
for (EventData event : span.getEvents()) {
if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null
|| event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) {
String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE);
if (stacktrace != null) {
trackException(stacktrace, span, operationName, telemetryItems);
}
return;
}
TelemetryItem telemetry = new TelemetryItem();
MessageData data = new MessageData();
initTelemetry(telemetry, data, "Message", "MessageData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, event.getEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), event.getAttributes());
data.setMessage(event.getName());
telemetryItems.add(telemetry);
}
}
private void trackException(String errorStack, SpanData span, @Nullable String operationName,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
TelemetryExceptionData data = new TelemetryExceptionData();
initTelemetry(telemetry, data, "Exception", "ExceptionData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, span.getEndEpochNanos());
data.setExceptions(Exceptions.minimalParse(errorStack));
telemetryItems.add(telemetry);
}
private void initTelemetry(TelemetryItem telemetry, MonitorDomain data, String telemetryName,
String baseType) {
telemetry.setVersion(1);
telemetry.setName(telemetryName);
telemetry.setInstrumentationKey(instrumentationKey);
telemetry.setTags(new HashMap<>());
telemetry.getTags().put(ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
data.setVersion(2);
MonitorBase monitorBase = new MonitorBase();
telemetry.setData(monitorBase);
monitorBase.setBaseType(baseType);
monitorBase.setBaseData(data);
}
private static void setTime(TelemetryItem telemetry, long epochNanos) {
telemetry.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos));
}
private static void addLinks(Map<String, String> properties, List<LinkData> links) {
if (links.isEmpty()) {
return;
}
StringBuilder sb = new StringBuilder();
sb.append("[");
boolean first = true;
for (LinkData link : links) {
if (!first) {
sb.append(",");
}
sb.append("{\"operation_Id\":\"");
sb.append(link.getSpanContext().getTraceId());
sb.append("\",\"id\":\"");
sb.append(link.getSpanContext().getSpanId());
sb.append("\"}");
first = false;
}
sb.append("]");
properties.put("_MS.links", sb.toString());
}
private static void setExtraAttributes(TelemetryItem telemetry, Map<String, String> properties,
Attributes attributes) {
attributes.forEach((key, value) -> {
String stringKey = key.getKey();
if (stringKey.equals(AZURE_NAMESPACE.getKey())
|| stringKey.equals(AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey())
|| stringKey.equals(AZURE_SDK_ENQUEUED_TIME.getKey())) {
return;
}
if (stringKey.equals(KAFKA_RECORD_QUEUE_TIME_MS.getKey())
|| stringKey.equals(KAFKA_OFFSET.getKey())) {
return;
}
if (stringKey.equals(SemanticAttributes.ENDUSER_ID.getKey()) && value instanceof String) {
telemetry.getTags().put(ContextTagKeys.AI_USER_ID.toString(), (String) value);
return;
}
if (stringKey.equals(SemanticAttributes.HTTP_USER_AGENT.getKey())
&& value instanceof String) {
telemetry.getTags().put("ai.user.userAgent", (String) value);
return;
}
int index = stringKey.indexOf(".");
String prefix = index == -1 ? stringKey : stringKey.substring(0, index);
if (STANDARD_ATTRIBUTE_PREFIXES.contains(prefix)) {
return;
}
String val = convertToString(value, key.getType());
if (value != null) {
properties.put(key.getKey(), val);
}
});
}
@Nullable
private static String convertToString(Object value, AttributeType type) {
switch (type) {
case STRING:
case BOOLEAN:
case LONG:
case DOUBLE:
return String.valueOf(value);
case STRING_ARRAY:
case BOOLEAN_ARRAY:
case LONG_ARRAY:
case DOUBLE_ARRAY:
return join((List<?>) value);
default:
LOGGER.warning("unexpected attribute type: {}", type);
return null;
}
}
private static <T> String join(List<T> values) {
StringBuilder sb = new StringBuilder();
for (Object val : values) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(val);
}
return sb.toString();
}
} | data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); | private void export(SpanData span, List<TelemetryItem> telemetryItems) {
SpanKind kind = span.getKind();
String instrumentationName = span.getInstrumentationLibraryInfo().getName();
if (kind == SpanKind.INTERNAL) {
if (instrumentationName.startsWith("io.opentelemetry.spring-scheduling-")
&& !span.getParentSpanContext().isValid()) {
exportRequest(span, telemetryItems);
} else {
exportRemoteDependency(span, true, telemetryItems);
}
} else if (kind == SpanKind.CLIENT || kind == SpanKind.PRODUCER) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.CONSUMER
&& "receive".equals(span.getAttributes().get(SemanticAttributes.MESSAGING_OPERATION))) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.SERVER || kind == SpanKind.CONSUMER) {
exportRequest(span, telemetryItems);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(kind.name()));
}
}
private void exportRemoteDependency(SpanData span, boolean inProc,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RemoteDependencyData data = new RemoteDependencyData();
initTelemetry(telemetry, data, "RemoteDependency", "RemoteDependencyData");
data.setProperties(new HashMap<>());
float samplingPercentage = 100;
setOperationTags(telemetry, span);
setTime(telemetry, span.getStartEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), span.getAttributes());
addLinks(data.getProperties(), span.getLinks());
data.setId(span.getSpanId());
data.setName(getDependencyName(span));
data.setDuration(
FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos()));
data.setSuccess(getSuccess(span));
if (inProc) {
data.setType("InProc");
} else {
applySemanticConventions(span, data);
}
telemetryItems.add(telemetry);
exportEvents(span, null, telemetryItems);
}
private static final Set<String> DEFAULT_HTTP_SPAN_NAMES =
new HashSet<>(
Arrays.asList(
"HTTP OPTIONS",
"HTTP GET",
"HTTP HEAD",
"HTTP POST",
"HTTP PUT",
"HTTP DELETE",
"HTTP TRACE",
"HTTP CONNECT",
"HTTP PATCH"));
private static String getDependencyName(SpanData span) {
String name = span.getName();
String method = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (method == null) {
return name;
}
if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) {
return name;
}
String url = span.getAttributes().get(SemanticAttributes.HTTP_URL);
if (url == null) {
return name;
}
String path = UrlParser.getPathFromUrl(url);
if (path == null) {
return name;
}
return path.isEmpty() ? method + " /" : method + " " + path;
}
private static void applySemanticConventions(SpanData span, RemoteDependencyData remoteDependencyData) {
Attributes attributes = span.getAttributes();
String httpMethod = attributes.get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null) {
applyHttpClientSpan(attributes, remoteDependencyData);
return;
}
String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM);
if (rpcSystem != null) {
applyRpcClientSpan(attributes, remoteDependencyData, rpcSystem);
return;
}
String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM);
if (dbSystem != null) {
applyDatabaseClientSpan(attributes, remoteDependencyData, dbSystem);
return;
}
String azureNamespace = attributes.get(AZURE_NAMESPACE);
if ("Microsoft.EventHub".equals(azureNamespace)) {
applyEventHubsSpan(attributes, remoteDependencyData);
return;
}
if ("Microsoft.ServiceBus".equals(azureNamespace)) {
applyServiceBusSpan(attributes, remoteDependencyData);
return;
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
applyMessagingClientSpan(attributes, remoteDependencyData, messagingSystem, span.getKind());
return;
}
String target = getTargetFromPeerAttributes(attributes, Integer.MAX_VALUE);
if (target != null) {
remoteDependencyData.setTarget(target);
return;
}
remoteDependencyData.setType("InProc");
}
private static void setOperationTags(TelemetryItem telemetry, SpanData span) {
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getParentSpanContext().getSpanId());
setOperationName(telemetry, span.getAttributes());
}
private static void setOperationId(TelemetryItem telemetry, String traceId) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), traceId);
}
private static void setOperationParentId(TelemetryItem telemetry, String parentSpanId) {
if (SpanId.isValid(parentSpanId)) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId);
}
}
private static void setOperationName(TelemetryItem telemetry, Attributes attributes) {
String operationName = attributes.get(AI_OPERATION_NAME_KEY);
if (operationName != null) {
setOperationName(telemetry, operationName);
}
}
private static void setOperationName(TelemetryItem telemetry, String operationName) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
}
private static void applyHttpClientSpan(Attributes attributes, RemoteDependencyData telemetry) {
String target = getTargetForHttpClientSpan(attributes);
telemetry.setType("Http");
telemetry.setTarget(target);
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode != null) {
telemetry.setResultCode(Long.toString(httpStatusCode));
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
telemetry.setData(url);
}
private static String getTargetForHttpClientSpan(Attributes attributes) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
target = attributes.get(SemanticAttributes.HTTP_HOST);
if (target != null) {
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if ("http".equals(scheme)) {
if (target.endsWith(":80")) {
target = target.substring(0, target.length() - 3);
}
} else if ("https".equals(scheme)) {
if (target.endsWith(":443")) {
target = target.substring(0, target.length() - 4);
}
}
return target;
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
if (url != null) {
target = UrlParser.getTargetFromUrl(url);
if (target != null) {
return target;
}
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
int defaultPort;
if ("http".equals(scheme)) {
defaultPort = 80;
} else if ("https".equals(scheme)) {
defaultPort = 443;
} else {
defaultPort = 0;
}
target = getTargetFromNetAttributes(attributes, defaultPort);
if (target != null) {
return target;
}
return "Http";
}
@Nullable
private static String getTargetFromPeerAttributes(Attributes attributes, int defaultPort) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
return getTargetFromNetAttributes(attributes, defaultPort);
}
@Nullable
private static String getTargetFromPeerService(Attributes attributes) {
return attributes.get(SemanticAttributes.PEER_SERVICE);
}
@Nullable
private static String getTargetFromNetAttributes(Attributes attributes, int defaultPort) {
String target = getHostFromNetAttributes(attributes);
if (target == null) {
return null;
}
Long port = attributes.get(SemanticAttributes.NET_PEER_PORT);
if (port != null && port != defaultPort) {
return target + ":" + port;
}
return target;
}
@Nullable
private static String getHostFromNetAttributes(Attributes attributes) {
String host = attributes.get(SemanticAttributes.NET_PEER_NAME);
if (host != null) {
return host;
}
return attributes.get(SemanticAttributes.NET_PEER_IP);
}
private static void applyRpcClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String rpcSystem) {
telemetry.setType(rpcSystem);
String target = getTargetFromPeerAttributes(attributes, 0);
if (target == null) {
target = rpcSystem;
}
telemetry.setTarget(target);
}
private static void applyDatabaseClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String dbSystem) {
String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT);
String type;
if (SQL_DB_SYSTEMS.contains(dbSystem)) {
if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) {
type = "mysql";
} else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) {
type = "postgresql";
} else {
type = "SQL";
}
} else {
type = dbSystem;
}
telemetry.setType(type);
telemetry.setData(dbStatement);
String target =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, getDefaultPortForDbSystem(dbSystem)),
attributes.get(SemanticAttributes.DB_NAME),
" | ");
if (target == null) {
target = dbSystem;
}
telemetry.setTarget(target);
}
private static void applyMessagingClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String messagingSystem, SpanKind spanKind) {
if (spanKind == SpanKind.PRODUCER) {
telemetry.setType("Queue Message | " + messagingSystem);
} else {
telemetry.setType(messagingSystem);
}
String destination = attributes.get(SemanticAttributes.MESSAGING_DESTINATION);
if (destination != null) {
telemetry.setTarget(destination);
} else {
telemetry.setTarget(messagingSystem);
}
}
private static void applyEventHubsSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("Microsoft.EventHub");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static void applyServiceBusSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("AZURE SERVICE BUS");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static String getAzureSdkTargetSource(Attributes attributes) {
String peerAddress = attributes.get(AZURE_SDK_PEER_ADDRESS);
String destination = attributes.get(AZURE_SDK_MESSAGE_BUS_DESTINATION);
return peerAddress + "/" + destination;
}
private static int getDefaultPortForDbSystem(String dbSystem) {
switch (dbSystem) {
case SemanticAttributes.DbSystemValues.MONGODB:
return 27017;
case SemanticAttributes.DbSystemValues.CASSANDRA:
return 9042;
case SemanticAttributes.DbSystemValues.REDIS:
return 6379;
case SemanticAttributes.DbSystemValues.MARIADB:
case SemanticAttributes.DbSystemValues.MYSQL:
return 3306;
case SemanticAttributes.DbSystemValues.MSSQL:
return 1433;
case SemanticAttributes.DbSystemValues.DB2:
return 50000;
case SemanticAttributes.DbSystemValues.ORACLE:
return 1521;
case SemanticAttributes.DbSystemValues.H2:
return 8082;
case SemanticAttributes.DbSystemValues.DERBY:
return 1527;
case SemanticAttributes.DbSystemValues.POSTGRESQL:
return 5432;
default:
return 0;
}
}
private void exportRequest(SpanData span, List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RequestData data = new RequestData();
initTelemetry(telemetry, data, "Request", "RequestData");
data.setProperties(new HashMap<>());
Attributes attributes = span.getAttributes();
long startEpochNanos = span.getStartEpochNanos();
float samplingPercentage = 100;
data.setId(span.getSpanId());
setTime(telemetry, startEpochNanos);
setExtraAttributes(telemetry, data.getProperties(), attributes);
addLinks(data.getProperties(), span.getLinks());
String operationName = getOperationName(span);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId());
telemetry
.getTags()
.put(
ContextTagKeys.AI_OPERATION_PARENT_ID.toString(),
span.getParentSpanContext().getSpanId());
data.setName(operationName);
data.setDuration(FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos));
data.setSuccess(getSuccess(span));
String httpUrl = getHttpUrlFromServerSpan(attributes);
if (httpUrl != null) {
data.setUrl(httpUrl);
}
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode == null) {
httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE);
}
if (httpStatusCode != null) {
data.setResponseCode(Long.toString(httpStatusCode));
} else {
data.setResponseCode("0");
}
String locationIp = attributes.get(SemanticAttributes.HTTP_CLIENT_IP);
if (locationIp == null) {
locationIp = attributes.get(SemanticAttributes.NET_PEER_IP);
}
if (locationIp != null) {
telemetry.getTags().put(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp);
}
data.setSource(getSource(attributes));
Long enqueuedTime = attributes.get(AZURE_SDK_ENQUEUED_TIME);
if (enqueuedTime != null) {
long timeSinceEnqueuedMillis =
Math.max(
0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime));
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
Long timeSinceEnqueuedMillis = attributes.get(KAFKA_RECORD_QUEUE_TIME_MS);
if (timeSinceEnqueuedMillis != null) {
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
telemetryItems.add(telemetry);
exportEvents(span, operationName, telemetryItems);
}
private boolean getSuccess(SpanData span) {
switch (span.getStatus().getStatusCode()) {
case ERROR:
return false;
case OK:
return true;
case UNSET:
Long statusCode = span.getAttributes().get(SemanticAttributes.HTTP_STATUS_CODE);
return statusCode == null || statusCode < 400;
default:
return true;
}
}
@Nullable
private static String getHttpUrlFromServerSpan(Attributes attributes) {
String httpUrl = attributes.get(SemanticAttributes.HTTP_URL);
if (httpUrl != null) {
return httpUrl;
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if (scheme == null) {
return null;
}
String host = attributes.get(SemanticAttributes.HTTP_HOST);
if (host == null) {
return null;
}
String target = attributes.get(SemanticAttributes.HTTP_TARGET);
if (target == null) {
return null;
}
return scheme + ":
}
private static String getSource(Attributes attributes) {
if (isAzureQueue(attributes)) {
return getAzureSdkTargetSource(attributes);
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
String source =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, 0),
attributes.get(SemanticAttributes.MESSAGING_DESTINATION),
"/");
if (source != null) {
return source;
}
return messagingSystem;
}
return null;
}
private static boolean isAzureQueue(Attributes attributes) {
String azureNamespace = attributes.get(AZURE_NAMESPACE);
return "Microsoft.EventHub".equals(azureNamespace)
|| "Microsoft.ServiceBus".equals(azureNamespace);
}
private static String getOperationName(SpanData span) {
String spanName = span.getName();
String httpMethod = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null && !httpMethod.isEmpty() && spanName.startsWith("/")) {
return httpMethod + " " + spanName;
}
return spanName;
}
private static String nullAwareConcat(String str1, String str2, String separator) {
if (str1 == null) {
return str2;
}
if (str2 == null) {
return str1;
}
return str1 + separator + str2;
}
private void exportEvents(SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) {
for (EventData event : span.getEvents()) {
if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null
|| event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) {
String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE);
if (stacktrace != null) {
trackException(stacktrace, span, operationName, telemetryItems);
}
return;
}
TelemetryItem telemetry = new TelemetryItem();
MessageData data = new MessageData();
initTelemetry(telemetry, data, "Message", "MessageData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, event.getEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), event.getAttributes());
data.setMessage(event.getName());
telemetryItems.add(telemetry);
}
}
private void trackException(String errorStack, SpanData span, @Nullable String operationName,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
TelemetryExceptionData data = new TelemetryExceptionData();
initTelemetry(telemetry, data, "Exception", "ExceptionData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, span.getEndEpochNanos());
data.setExceptions(Exceptions.minimalParse(errorStack));
telemetryItems.add(telemetry);
}
private void initTelemetry(TelemetryItem telemetry, MonitorDomain data, String telemetryName,
String baseType) {
telemetry.setVersion(1);
telemetry.setName(telemetryName);
telemetry.setInstrumentationKey(instrumentationKey);
telemetry.setTags(new HashMap<>());
telemetry.getTags().put(ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
data.setVersion(2);
MonitorBase monitorBase = new MonitorBase();
telemetry.setData(monitorBase);
monitorBase.setBaseType(baseType);
monitorBase.setBaseData(data);
}
private static void setTime(TelemetryItem telemetry, long epochNanos) {
telemetry.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos));
}
private static void addLinks(Map<String, String> properties, List<LinkData> links) {
if (links.isEmpty()) {
return;
}
StringBuilder sb = new StringBuilder();
sb.append("[");
boolean first = true;
for (LinkData link : links) {
if (!first) {
sb.append(",");
}
sb.append("{\"operation_Id\":\"");
sb.append(link.getSpanContext().getTraceId());
sb.append("\",\"id\":\"");
sb.append(link.getSpanContext().getSpanId());
sb.append("\"}");
first = false;
}
sb.append("]");
properties.put("_MS.links", sb.toString());
}
private static void setExtraAttributes(TelemetryItem telemetry, Map<String, String> properties,
Attributes attributes) {
attributes.forEach((key, value) -> {
String stringKey = key.getKey();
if (stringKey.equals(AZURE_NAMESPACE.getKey())
|| stringKey.equals(AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey())
|| stringKey.equals(AZURE_SDK_ENQUEUED_TIME.getKey())) {
return;
}
if (stringKey.equals(KAFKA_RECORD_QUEUE_TIME_MS.getKey())
|| stringKey.equals(KAFKA_OFFSET.getKey())) {
return;
}
if (stringKey.equals(SemanticAttributes.ENDUSER_ID.getKey()) && value instanceof String) {
telemetry.getTags().put(ContextTagKeys.AI_USER_ID.toString(), (String) value);
return;
}
if (stringKey.equals(SemanticAttributes.HTTP_USER_AGENT.getKey())
&& value instanceof String) {
telemetry.getTags().put("ai.user.userAgent", (String) value);
return;
}
int index = stringKey.indexOf(".");
String prefix = index == -1 ? stringKey : stringKey.substring(0, index);
if (STANDARD_ATTRIBUTE_PREFIXES.contains(prefix)) {
return;
}
String val = convertToString(value, key.getType());
if (value != null) {
properties.put(key.getKey(), val);
}
});
}
@Nullable
private static String convertToString(Object value, AttributeType type) {
switch (type) {
case STRING:
case BOOLEAN:
case LONG:
case DOUBLE:
return String.valueOf(value);
case STRING_ARRAY:
case BOOLEAN_ARRAY:
case LONG_ARRAY:
case DOUBLE_ARRAY:
return join((List<?>) value);
default:
LOGGER.warning("unexpected attribute type: {}", type);
return null;
}
}
private static <T> String join(List<T> values) {
StringBuilder sb = new StringBuilder();
for (Object val : values) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(val);
}
return sb.toString();
}
} | class AzureMonitorTraceExporter implements SpanExporter {
private static final Set<String> SQL_DB_SYSTEMS;
private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES;
private static final AttributeKey<String> AI_OPERATION_NAME_KEY =
AttributeKey.stringKey("applicationinsights.internal.operation_name");
private static final AttributeKey<String> AZURE_NAMESPACE =
AttributeKey.stringKey("az.namespace");
private static final AttributeKey<String> AZURE_SDK_PEER_ADDRESS =
AttributeKey.stringKey("peer.address");
private static final AttributeKey<String> AZURE_SDK_MESSAGE_BUS_DESTINATION =
AttributeKey.stringKey("message_bus.destination");
private static final AttributeKey<Long> AZURE_SDK_ENQUEUED_TIME =
AttributeKey.longKey("x-opt-enqueued-time");
private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS =
longKey("kafka.record.queue_time_ms");
private static final AttributeKey<Long> KAFKA_OFFSET = longKey("kafka.offset");
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorTraceExporter.class);
static {
Set<String> dbSystems = new HashSet<>();
dbSystems.add(SemanticAttributes.DbSystemValues.DB2);
dbSystems.add(SemanticAttributes.DbSystemValues.DERBY);
dbSystems.add(SemanticAttributes.DbSystemValues.MARIADB);
dbSystems.add(SemanticAttributes.DbSystemValues.MSSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.MYSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.ORACLE);
dbSystems.add(SemanticAttributes.DbSystemValues.POSTGRESQL);
dbSystems.add(SemanticAttributes.DbSystemValues.SQLITE);
dbSystems.add(SemanticAttributes.DbSystemValues.OTHER_SQL);
dbSystems.add(SemanticAttributes.DbSystemValues.HSQLDB);
dbSystems.add(SemanticAttributes.DbSystemValues.H2);
SQL_DB_SYSTEMS = Collections.unmodifiableSet(dbSystems);
Set<String> standardAttributesPrefix = new HashSet<>();
standardAttributesPrefix.add("http");
standardAttributesPrefix.add("db");
standardAttributesPrefix.add("message");
standardAttributesPrefix.add("messaging");
standardAttributesPrefix.add("rpc");
standardAttributesPrefix.add("enduser");
standardAttributesPrefix.add("net");
standardAttributesPrefix.add("peer");
standardAttributesPrefix.add("exception");
standardAttributesPrefix.add("thread");
standardAttributesPrefix.add("faas");
STANDARD_ATTRIBUTE_PREFIXES = Collections.unmodifiableSet(standardAttributesPrefix);
}
private final MonitorExporterAsyncClient client;
private final String instrumentationKey;
/**
* Creates an instance of exporter that is configured with given exporter client that sends telemetry events to
* Application Insights resource identified by the instrumentation key.
*
* @param client The client used to send data to Azure Monitor.
* @param instrumentationKey The instrumentation key of Application Insights resource.
*/
AzureMonitorTraceExporter(MonitorExporterAsyncClient client, String instrumentationKey) {
this.client = client;
this.instrumentationKey = instrumentationKey;
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode export(Collection<SpanData> spans) {
CompletableResultCode completableResultCode = new CompletableResultCode();
try {
List<TelemetryItem> telemetryItems = new ArrayList<>();
for (SpanData span : spans) {
LOGGER.verbose("exporting span: {}", span);
export(span, telemetryItems);
}
client.export(telemetryItems)
.subscriberContext(Context.of(Tracer.DISABLE_TRACING_KEY, true))
.subscribe(ignored -> { }, error -> completableResultCode.fail(), completableResultCode::succeed);
return completableResultCode;
} catch (Throwable t) {
LOGGER.error(t.getMessage(), t);
return completableResultCode.fail();
}
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode flush() {
return CompletableResultCode.ofSuccess();
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode shutdown() {
return CompletableResultCode.ofSuccess();
} | class AzureMonitorTraceExporter implements SpanExporter {
private static final Set<String> SQL_DB_SYSTEMS;
private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES;
private static final AttributeKey<String> AI_OPERATION_NAME_KEY =
AttributeKey.stringKey("applicationinsights.internal.operation_name");
private static final AttributeKey<String> AZURE_NAMESPACE =
AttributeKey.stringKey("az.namespace");
private static final AttributeKey<String> AZURE_SDK_PEER_ADDRESS =
AttributeKey.stringKey("peer.address");
private static final AttributeKey<String> AZURE_SDK_MESSAGE_BUS_DESTINATION =
AttributeKey.stringKey("message_bus.destination");
private static final AttributeKey<Long> AZURE_SDK_ENQUEUED_TIME =
AttributeKey.longKey("x-opt-enqueued-time");
private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS =
longKey("kafka.record.queue_time_ms");
private static final AttributeKey<Long> KAFKA_OFFSET = longKey("kafka.offset");
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorTraceExporter.class);
static {
Set<String> dbSystems = new HashSet<>();
dbSystems.add(SemanticAttributes.DbSystemValues.DB2);
dbSystems.add(SemanticAttributes.DbSystemValues.DERBY);
dbSystems.add(SemanticAttributes.DbSystemValues.MARIADB);
dbSystems.add(SemanticAttributes.DbSystemValues.MSSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.MYSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.ORACLE);
dbSystems.add(SemanticAttributes.DbSystemValues.POSTGRESQL);
dbSystems.add(SemanticAttributes.DbSystemValues.SQLITE);
dbSystems.add(SemanticAttributes.DbSystemValues.OTHER_SQL);
dbSystems.add(SemanticAttributes.DbSystemValues.HSQLDB);
dbSystems.add(SemanticAttributes.DbSystemValues.H2);
SQL_DB_SYSTEMS = Collections.unmodifiableSet(dbSystems);
Set<String> standardAttributesPrefix = new HashSet<>();
standardAttributesPrefix.add("http");
standardAttributesPrefix.add("db");
standardAttributesPrefix.add("message");
standardAttributesPrefix.add("messaging");
standardAttributesPrefix.add("rpc");
standardAttributesPrefix.add("enduser");
standardAttributesPrefix.add("net");
standardAttributesPrefix.add("peer");
standardAttributesPrefix.add("exception");
standardAttributesPrefix.add("thread");
standardAttributesPrefix.add("faas");
STANDARD_ATTRIBUTE_PREFIXES = Collections.unmodifiableSet(standardAttributesPrefix);
}
private final MonitorExporterAsyncClient client;
private final String instrumentationKey;
/**
* Creates an instance of exporter that is configured with given exporter client that sends telemetry events to
* Application Insights resource identified by the instrumentation key.
*
* @param client The client used to send data to Azure Monitor.
* @param instrumentationKey The instrumentation key of Application Insights resource.
*/
AzureMonitorTraceExporter(MonitorExporterAsyncClient client, String instrumentationKey) {
this.client = client;
this.instrumentationKey = instrumentationKey;
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode export(Collection<SpanData> spans) {
CompletableResultCode completableResultCode = new CompletableResultCode();
try {
List<TelemetryItem> telemetryItems = new ArrayList<>();
for (SpanData span : spans) {
LOGGER.verbose("exporting span: {}", span);
export(span, telemetryItems);
}
client.export(telemetryItems)
.subscriberContext(Context.of(Tracer.DISABLE_TRACING_KEY, true))
.subscribe(ignored -> { }, error -> completableResultCode.fail(), completableResultCode::succeed);
return completableResultCode;
} catch (Throwable t) {
LOGGER.error(t.getMessage(), t);
return completableResultCode.fail();
}
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode flush() {
return CompletableResultCode.ofSuccess();
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode shutdown() {
return CompletableResultCode.ofSuccess();
} |
one is sent by the Azure SDK instrumentation, the other is sent by Kafka instrumentation, so there's no overlap | private void export(SpanData span, List<TelemetryItem> telemetryItems) {
SpanKind kind = span.getKind();
String instrumentationName = span.getInstrumentationLibraryInfo().getName();
if (kind == SpanKind.INTERNAL) {
if (instrumentationName.startsWith("io.opentelemetry.spring-scheduling-")
&& !span.getParentSpanContext().isValid()) {
exportRequest(span, telemetryItems);
} else {
exportRemoteDependency(span, true, telemetryItems);
}
} else if (kind == SpanKind.CLIENT || kind == SpanKind.PRODUCER) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.CONSUMER
&& "receive".equals(span.getAttributes().get(SemanticAttributes.MESSAGING_OPERATION))) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.SERVER || kind == SpanKind.CONSUMER) {
exportRequest(span, telemetryItems);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(kind.name()));
}
}
private void exportRemoteDependency(SpanData span, boolean inProc,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RemoteDependencyData data = new RemoteDependencyData();
initTelemetry(telemetry, data, "RemoteDependency", "RemoteDependencyData");
data.setProperties(new HashMap<>());
float samplingPercentage = 100;
setOperationTags(telemetry, span);
setTime(telemetry, span.getStartEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), span.getAttributes());
addLinks(data.getProperties(), span.getLinks());
data.setId(span.getSpanId());
data.setName(getDependencyName(span));
data.setDuration(
FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos()));
data.setSuccess(getSuccess(span));
if (inProc) {
data.setType("InProc");
} else {
applySemanticConventions(span, data);
}
telemetryItems.add(telemetry);
exportEvents(span, null, telemetryItems);
}
private static final Set<String> DEFAULT_HTTP_SPAN_NAMES =
new HashSet<>(
Arrays.asList(
"HTTP OPTIONS",
"HTTP GET",
"HTTP HEAD",
"HTTP POST",
"HTTP PUT",
"HTTP DELETE",
"HTTP TRACE",
"HTTP CONNECT",
"HTTP PATCH"));
private static String getDependencyName(SpanData span) {
String name = span.getName();
String method = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (method == null) {
return name;
}
if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) {
return name;
}
String url = span.getAttributes().get(SemanticAttributes.HTTP_URL);
if (url == null) {
return name;
}
String path = UrlParser.getPathFromUrl(url);
if (path == null) {
return name;
}
return path.isEmpty() ? method + " /" : method + " " + path;
}
private static void applySemanticConventions(SpanData span, RemoteDependencyData remoteDependencyData) {
Attributes attributes = span.getAttributes();
String httpMethod = attributes.get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null) {
applyHttpClientSpan(attributes, remoteDependencyData);
return;
}
String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM);
if (rpcSystem != null) {
applyRpcClientSpan(attributes, remoteDependencyData, rpcSystem);
return;
}
String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM);
if (dbSystem != null) {
applyDatabaseClientSpan(attributes, remoteDependencyData, dbSystem);
return;
}
String azureNamespace = attributes.get(AZURE_NAMESPACE);
if ("Microsoft.EventHub".equals(azureNamespace)) {
applyEventHubsSpan(attributes, remoteDependencyData);
return;
}
if ("Microsoft.ServiceBus".equals(azureNamespace)) {
applyServiceBusSpan(attributes, remoteDependencyData);
return;
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
applyMessagingClientSpan(attributes, remoteDependencyData, messagingSystem, span.getKind());
return;
}
String target = getTargetFromPeerAttributes(attributes, Integer.MAX_VALUE);
if (target != null) {
remoteDependencyData.setTarget(target);
return;
}
remoteDependencyData.setType("InProc");
}
private static void setOperationTags(TelemetryItem telemetry, SpanData span) {
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getParentSpanContext().getSpanId());
setOperationName(telemetry, span.getAttributes());
}
private static void setOperationId(TelemetryItem telemetry, String traceId) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), traceId);
}
private static void setOperationParentId(TelemetryItem telemetry, String parentSpanId) {
if (SpanId.isValid(parentSpanId)) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId);
}
}
private static void setOperationName(TelemetryItem telemetry, Attributes attributes) {
String operationName = attributes.get(AI_OPERATION_NAME_KEY);
if (operationName != null) {
setOperationName(telemetry, operationName);
}
}
private static void setOperationName(TelemetryItem telemetry, String operationName) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
}
private static void applyHttpClientSpan(Attributes attributes, RemoteDependencyData telemetry) {
String target = getTargetForHttpClientSpan(attributes);
telemetry.setType("Http");
telemetry.setTarget(target);
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode != null) {
telemetry.setResultCode(Long.toString(httpStatusCode));
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
telemetry.setData(url);
}
private static String getTargetForHttpClientSpan(Attributes attributes) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
target = attributes.get(SemanticAttributes.HTTP_HOST);
if (target != null) {
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if ("http".equals(scheme)) {
if (target.endsWith(":80")) {
target = target.substring(0, target.length() - 3);
}
} else if ("https".equals(scheme)) {
if (target.endsWith(":443")) {
target = target.substring(0, target.length() - 4);
}
}
return target;
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
if (url != null) {
target = UrlParser.getTargetFromUrl(url);
if (target != null) {
return target;
}
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
int defaultPort;
if ("http".equals(scheme)) {
defaultPort = 80;
} else if ("https".equals(scheme)) {
defaultPort = 443;
} else {
defaultPort = 0;
}
target = getTargetFromNetAttributes(attributes, defaultPort);
if (target != null) {
return target;
}
return "Http";
}
@Nullable
private static String getTargetFromPeerAttributes(Attributes attributes, int defaultPort) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
return getTargetFromNetAttributes(attributes, defaultPort);
}
@Nullable
private static String getTargetFromPeerService(Attributes attributes) {
return attributes.get(SemanticAttributes.PEER_SERVICE);
}
@Nullable
private static String getTargetFromNetAttributes(Attributes attributes, int defaultPort) {
String target = getHostFromNetAttributes(attributes);
if (target == null) {
return null;
}
Long port = attributes.get(SemanticAttributes.NET_PEER_PORT);
if (port != null && port != defaultPort) {
return target + ":" + port;
}
return target;
}
@Nullable
private static String getHostFromNetAttributes(Attributes attributes) {
String host = attributes.get(SemanticAttributes.NET_PEER_NAME);
if (host != null) {
return host;
}
return attributes.get(SemanticAttributes.NET_PEER_IP);
}
private static void applyRpcClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String rpcSystem) {
telemetry.setType(rpcSystem);
String target = getTargetFromPeerAttributes(attributes, 0);
if (target == null) {
target = rpcSystem;
}
telemetry.setTarget(target);
}
private static void applyDatabaseClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String dbSystem) {
String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT);
String type;
if (SQL_DB_SYSTEMS.contains(dbSystem)) {
if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) {
type = "mysql";
} else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) {
type = "postgresql";
} else {
type = "SQL";
}
} else {
type = dbSystem;
}
telemetry.setType(type);
telemetry.setData(dbStatement);
String target =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, getDefaultPortForDbSystem(dbSystem)),
attributes.get(SemanticAttributes.DB_NAME),
" | ");
if (target == null) {
target = dbSystem;
}
telemetry.setTarget(target);
}
private static void applyMessagingClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String messagingSystem, SpanKind spanKind) {
if (spanKind == SpanKind.PRODUCER) {
telemetry.setType("Queue Message | " + messagingSystem);
} else {
telemetry.setType(messagingSystem);
}
String destination = attributes.get(SemanticAttributes.MESSAGING_DESTINATION);
if (destination != null) {
telemetry.setTarget(destination);
} else {
telemetry.setTarget(messagingSystem);
}
}
private static void applyEventHubsSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("Microsoft.EventHub");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static void applyServiceBusSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("AZURE SERVICE BUS");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static String getAzureSdkTargetSource(Attributes attributes) {
String peerAddress = attributes.get(AZURE_SDK_PEER_ADDRESS);
String destination = attributes.get(AZURE_SDK_MESSAGE_BUS_DESTINATION);
return peerAddress + "/" + destination;
}
private static int getDefaultPortForDbSystem(String dbSystem) {
switch (dbSystem) {
case SemanticAttributes.DbSystemValues.MONGODB:
return 27017;
case SemanticAttributes.DbSystemValues.CASSANDRA:
return 9042;
case SemanticAttributes.DbSystemValues.REDIS:
return 6379;
case SemanticAttributes.DbSystemValues.MARIADB:
case SemanticAttributes.DbSystemValues.MYSQL:
return 3306;
case SemanticAttributes.DbSystemValues.MSSQL:
return 1433;
case SemanticAttributes.DbSystemValues.DB2:
return 50000;
case SemanticAttributes.DbSystemValues.ORACLE:
return 1521;
case SemanticAttributes.DbSystemValues.H2:
return 8082;
case SemanticAttributes.DbSystemValues.DERBY:
return 1527;
case SemanticAttributes.DbSystemValues.POSTGRESQL:
return 5432;
default:
return 0;
}
}
private void exportRequest(SpanData span, List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RequestData data = new RequestData();
initTelemetry(telemetry, data, "Request", "RequestData");
data.setProperties(new HashMap<>());
Attributes attributes = span.getAttributes();
long startEpochNanos = span.getStartEpochNanos();
float samplingPercentage = 100;
data.setId(span.getSpanId());
setTime(telemetry, startEpochNanos);
setExtraAttributes(telemetry, data.getProperties(), attributes);
addLinks(data.getProperties(), span.getLinks());
String operationName = getOperationName(span);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId());
telemetry
.getTags()
.put(
ContextTagKeys.AI_OPERATION_PARENT_ID.toString(),
span.getParentSpanContext().getSpanId());
data.setName(operationName);
data.setDuration(FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos));
data.setSuccess(getSuccess(span));
String httpUrl = getHttpUrlFromServerSpan(attributes);
if (httpUrl != null) {
data.setUrl(httpUrl);
}
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode == null) {
httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE);
}
if (httpStatusCode != null) {
data.setResponseCode(Long.toString(httpStatusCode));
} else {
data.setResponseCode("0");
}
String locationIp = attributes.get(SemanticAttributes.HTTP_CLIENT_IP);
if (locationIp == null) {
locationIp = attributes.get(SemanticAttributes.NET_PEER_IP);
}
if (locationIp != null) {
telemetry.getTags().put(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp);
}
data.setSource(getSource(attributes));
Long enqueuedTime = attributes.get(AZURE_SDK_ENQUEUED_TIME);
if (enqueuedTime != null) {
long timeSinceEnqueuedMillis =
Math.max(
0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime));
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
Long timeSinceEnqueuedMillis = attributes.get(KAFKA_RECORD_QUEUE_TIME_MS);
if (timeSinceEnqueuedMillis != null) {
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
telemetryItems.add(telemetry);
exportEvents(span, operationName, telemetryItems);
}
private boolean getSuccess(SpanData span) {
switch (span.getStatus().getStatusCode()) {
case ERROR:
return false;
case OK:
return true;
case UNSET:
Long statusCode = span.getAttributes().get(SemanticAttributes.HTTP_STATUS_CODE);
return statusCode == null || statusCode < 400;
default:
return true;
}
}
@Nullable
private static String getHttpUrlFromServerSpan(Attributes attributes) {
String httpUrl = attributes.get(SemanticAttributes.HTTP_URL);
if (httpUrl != null) {
return httpUrl;
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if (scheme == null) {
return null;
}
String host = attributes.get(SemanticAttributes.HTTP_HOST);
if (host == null) {
return null;
}
String target = attributes.get(SemanticAttributes.HTTP_TARGET);
if (target == null) {
return null;
}
return scheme + ":
}
private static String getSource(Attributes attributes) {
if (isAzureQueue(attributes)) {
return getAzureSdkTargetSource(attributes);
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
String source =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, 0),
attributes.get(SemanticAttributes.MESSAGING_DESTINATION),
"/");
if (source != null) {
return source;
}
return messagingSystem;
}
return null;
}
private static boolean isAzureQueue(Attributes attributes) {
String azureNamespace = attributes.get(AZURE_NAMESPACE);
return "Microsoft.EventHub".equals(azureNamespace)
|| "Microsoft.ServiceBus".equals(azureNamespace);
}
private static String getOperationName(SpanData span) {
String spanName = span.getName();
String httpMethod = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null && !httpMethod.isEmpty() && spanName.startsWith("/")) {
return httpMethod + " " + spanName;
}
return spanName;
}
private static String nullAwareConcat(String str1, String str2, String separator) {
if (str1 == null) {
return str2;
}
if (str2 == null) {
return str1;
}
return str1 + separator + str2;
}
private void exportEvents(SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) {
for (EventData event : span.getEvents()) {
if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null
|| event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) {
String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE);
if (stacktrace != null) {
trackException(stacktrace, span, operationName, telemetryItems);
}
return;
}
TelemetryItem telemetry = new TelemetryItem();
MessageData data = new MessageData();
initTelemetry(telemetry, data, "Message", "MessageData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, event.getEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), event.getAttributes());
data.setMessage(event.getName());
telemetryItems.add(telemetry);
}
}
private void trackException(String errorStack, SpanData span, @Nullable String operationName,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
TelemetryExceptionData data = new TelemetryExceptionData();
initTelemetry(telemetry, data, "Exception", "ExceptionData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, span.getEndEpochNanos());
data.setExceptions(Exceptions.minimalParse(errorStack));
telemetryItems.add(telemetry);
}
private void initTelemetry(TelemetryItem telemetry, MonitorDomain data, String telemetryName,
String baseType) {
telemetry.setVersion(1);
telemetry.setName(telemetryName);
telemetry.setInstrumentationKey(instrumentationKey);
telemetry.setTags(new HashMap<>());
telemetry.getTags().put(ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
data.setVersion(2);
MonitorBase monitorBase = new MonitorBase();
telemetry.setData(monitorBase);
monitorBase.setBaseType(baseType);
monitorBase.setBaseData(data);
}
private static void setTime(TelemetryItem telemetry, long epochNanos) {
telemetry.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos));
}
private static void addLinks(Map<String, String> properties, List<LinkData> links) {
if (links.isEmpty()) {
return;
}
StringBuilder sb = new StringBuilder();
sb.append("[");
boolean first = true;
for (LinkData link : links) {
if (!first) {
sb.append(",");
}
sb.append("{\"operation_Id\":\"");
sb.append(link.getSpanContext().getTraceId());
sb.append("\",\"id\":\"");
sb.append(link.getSpanContext().getSpanId());
sb.append("\"}");
first = false;
}
sb.append("]");
properties.put("_MS.links", sb.toString());
}
private static void setExtraAttributes(TelemetryItem telemetry, Map<String, String> properties,
Attributes attributes) {
attributes.forEach((key, value) -> {
String stringKey = key.getKey();
if (stringKey.equals(AZURE_NAMESPACE.getKey())
|| stringKey.equals(AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey())
|| stringKey.equals(AZURE_SDK_ENQUEUED_TIME.getKey())) {
return;
}
if (stringKey.equals(KAFKA_RECORD_QUEUE_TIME_MS.getKey())
|| stringKey.equals(KAFKA_OFFSET.getKey())) {
return;
}
if (stringKey.equals(SemanticAttributes.ENDUSER_ID.getKey()) && value instanceof String) {
telemetry.getTags().put(ContextTagKeys.AI_USER_ID.toString(), (String) value);
return;
}
if (stringKey.equals(SemanticAttributes.HTTP_USER_AGENT.getKey())
&& value instanceof String) {
telemetry.getTags().put("ai.user.userAgent", (String) value);
return;
}
int index = stringKey.indexOf(".");
String prefix = index == -1 ? stringKey : stringKey.substring(0, index);
if (STANDARD_ATTRIBUTE_PREFIXES.contains(prefix)) {
return;
}
String val = convertToString(value, key.getType());
if (value != null) {
properties.put(key.getKey(), val);
}
});
}
@Nullable
private static String convertToString(Object value, AttributeType type) {
switch (type) {
case STRING:
case BOOLEAN:
case LONG:
case DOUBLE:
return String.valueOf(value);
case STRING_ARRAY:
case BOOLEAN_ARRAY:
case LONG_ARRAY:
case DOUBLE_ARRAY:
return join((List<?>) value);
default:
LOGGER.warning("unexpected attribute type: {}", type);
return null;
}
}
private static <T> String join(List<T> values) {
StringBuilder sb = new StringBuilder();
for (Object val : values) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(val);
}
return sb.toString();
}
} | data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); | private void export(SpanData span, List<TelemetryItem> telemetryItems) {
SpanKind kind = span.getKind();
String instrumentationName = span.getInstrumentationLibraryInfo().getName();
if (kind == SpanKind.INTERNAL) {
if (instrumentationName.startsWith("io.opentelemetry.spring-scheduling-")
&& !span.getParentSpanContext().isValid()) {
exportRequest(span, telemetryItems);
} else {
exportRemoteDependency(span, true, telemetryItems);
}
} else if (kind == SpanKind.CLIENT || kind == SpanKind.PRODUCER) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.CONSUMER
&& "receive".equals(span.getAttributes().get(SemanticAttributes.MESSAGING_OPERATION))) {
exportRemoteDependency(span, false, telemetryItems);
} else if (kind == SpanKind.SERVER || kind == SpanKind.CONSUMER) {
exportRequest(span, telemetryItems);
} else {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(kind.name()));
}
}
private void exportRemoteDependency(SpanData span, boolean inProc,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RemoteDependencyData data = new RemoteDependencyData();
initTelemetry(telemetry, data, "RemoteDependency", "RemoteDependencyData");
data.setProperties(new HashMap<>());
float samplingPercentage = 100;
setOperationTags(telemetry, span);
setTime(telemetry, span.getStartEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), span.getAttributes());
addLinks(data.getProperties(), span.getLinks());
data.setId(span.getSpanId());
data.setName(getDependencyName(span));
data.setDuration(
FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos()));
data.setSuccess(getSuccess(span));
if (inProc) {
data.setType("InProc");
} else {
applySemanticConventions(span, data);
}
telemetryItems.add(telemetry);
exportEvents(span, null, telemetryItems);
}
private static final Set<String> DEFAULT_HTTP_SPAN_NAMES =
new HashSet<>(
Arrays.asList(
"HTTP OPTIONS",
"HTTP GET",
"HTTP HEAD",
"HTTP POST",
"HTTP PUT",
"HTTP DELETE",
"HTTP TRACE",
"HTTP CONNECT",
"HTTP PATCH"));
private static String getDependencyName(SpanData span) {
String name = span.getName();
String method = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (method == null) {
return name;
}
if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) {
return name;
}
String url = span.getAttributes().get(SemanticAttributes.HTTP_URL);
if (url == null) {
return name;
}
String path = UrlParser.getPathFromUrl(url);
if (path == null) {
return name;
}
return path.isEmpty() ? method + " /" : method + " " + path;
}
private static void applySemanticConventions(SpanData span, RemoteDependencyData remoteDependencyData) {
Attributes attributes = span.getAttributes();
String httpMethod = attributes.get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null) {
applyHttpClientSpan(attributes, remoteDependencyData);
return;
}
String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM);
if (rpcSystem != null) {
applyRpcClientSpan(attributes, remoteDependencyData, rpcSystem);
return;
}
String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM);
if (dbSystem != null) {
applyDatabaseClientSpan(attributes, remoteDependencyData, dbSystem);
return;
}
String azureNamespace = attributes.get(AZURE_NAMESPACE);
if ("Microsoft.EventHub".equals(azureNamespace)) {
applyEventHubsSpan(attributes, remoteDependencyData);
return;
}
if ("Microsoft.ServiceBus".equals(azureNamespace)) {
applyServiceBusSpan(attributes, remoteDependencyData);
return;
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
applyMessagingClientSpan(attributes, remoteDependencyData, messagingSystem, span.getKind());
return;
}
String target = getTargetFromPeerAttributes(attributes, Integer.MAX_VALUE);
if (target != null) {
remoteDependencyData.setTarget(target);
return;
}
remoteDependencyData.setType("InProc");
}
private static void setOperationTags(TelemetryItem telemetry, SpanData span) {
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getParentSpanContext().getSpanId());
setOperationName(telemetry, span.getAttributes());
}
private static void setOperationId(TelemetryItem telemetry, String traceId) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), traceId);
}
private static void setOperationParentId(TelemetryItem telemetry, String parentSpanId) {
if (SpanId.isValid(parentSpanId)) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId);
}
}
private static void setOperationName(TelemetryItem telemetry, Attributes attributes) {
String operationName = attributes.get(AI_OPERATION_NAME_KEY);
if (operationName != null) {
setOperationName(telemetry, operationName);
}
}
private static void setOperationName(TelemetryItem telemetry, String operationName) {
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
}
private static void applyHttpClientSpan(Attributes attributes, RemoteDependencyData telemetry) {
String target = getTargetForHttpClientSpan(attributes);
telemetry.setType("Http");
telemetry.setTarget(target);
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode != null) {
telemetry.setResultCode(Long.toString(httpStatusCode));
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
telemetry.setData(url);
}
private static String getTargetForHttpClientSpan(Attributes attributes) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
target = attributes.get(SemanticAttributes.HTTP_HOST);
if (target != null) {
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if ("http".equals(scheme)) {
if (target.endsWith(":80")) {
target = target.substring(0, target.length() - 3);
}
} else if ("https".equals(scheme)) {
if (target.endsWith(":443")) {
target = target.substring(0, target.length() - 4);
}
}
return target;
}
String url = attributes.get(SemanticAttributes.HTTP_URL);
if (url != null) {
target = UrlParser.getTargetFromUrl(url);
if (target != null) {
return target;
}
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
int defaultPort;
if ("http".equals(scheme)) {
defaultPort = 80;
} else if ("https".equals(scheme)) {
defaultPort = 443;
} else {
defaultPort = 0;
}
target = getTargetFromNetAttributes(attributes, defaultPort);
if (target != null) {
return target;
}
return "Http";
}
@Nullable
private static String getTargetFromPeerAttributes(Attributes attributes, int defaultPort) {
String target = getTargetFromPeerService(attributes);
if (target != null) {
return target;
}
return getTargetFromNetAttributes(attributes, defaultPort);
}
@Nullable
private static String getTargetFromPeerService(Attributes attributes) {
return attributes.get(SemanticAttributes.PEER_SERVICE);
}
@Nullable
private static String getTargetFromNetAttributes(Attributes attributes, int defaultPort) {
String target = getHostFromNetAttributes(attributes);
if (target == null) {
return null;
}
Long port = attributes.get(SemanticAttributes.NET_PEER_PORT);
if (port != null && port != defaultPort) {
return target + ":" + port;
}
return target;
}
@Nullable
private static String getHostFromNetAttributes(Attributes attributes) {
String host = attributes.get(SemanticAttributes.NET_PEER_NAME);
if (host != null) {
return host;
}
return attributes.get(SemanticAttributes.NET_PEER_IP);
}
private static void applyRpcClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String rpcSystem) {
telemetry.setType(rpcSystem);
String target = getTargetFromPeerAttributes(attributes, 0);
if (target == null) {
target = rpcSystem;
}
telemetry.setTarget(target);
}
private static void applyDatabaseClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String dbSystem) {
String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT);
String type;
if (SQL_DB_SYSTEMS.contains(dbSystem)) {
if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) {
type = "mysql";
} else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) {
type = "postgresql";
} else {
type = "SQL";
}
} else {
type = dbSystem;
}
telemetry.setType(type);
telemetry.setData(dbStatement);
String target =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, getDefaultPortForDbSystem(dbSystem)),
attributes.get(SemanticAttributes.DB_NAME),
" | ");
if (target == null) {
target = dbSystem;
}
telemetry.setTarget(target);
}
private static void applyMessagingClientSpan(Attributes attributes, RemoteDependencyData telemetry,
String messagingSystem, SpanKind spanKind) {
if (spanKind == SpanKind.PRODUCER) {
telemetry.setType("Queue Message | " + messagingSystem);
} else {
telemetry.setType(messagingSystem);
}
String destination = attributes.get(SemanticAttributes.MESSAGING_DESTINATION);
if (destination != null) {
telemetry.setTarget(destination);
} else {
telemetry.setTarget(messagingSystem);
}
}
private static void applyEventHubsSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("Microsoft.EventHub");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static void applyServiceBusSpan(Attributes attributes, RemoteDependencyData telemetry) {
telemetry.setType("AZURE SERVICE BUS");
telemetry.setTarget(getAzureSdkTargetSource(attributes));
}
private static String getAzureSdkTargetSource(Attributes attributes) {
String peerAddress = attributes.get(AZURE_SDK_PEER_ADDRESS);
String destination = attributes.get(AZURE_SDK_MESSAGE_BUS_DESTINATION);
return peerAddress + "/" + destination;
}
private static int getDefaultPortForDbSystem(String dbSystem) {
switch (dbSystem) {
case SemanticAttributes.DbSystemValues.MONGODB:
return 27017;
case SemanticAttributes.DbSystemValues.CASSANDRA:
return 9042;
case SemanticAttributes.DbSystemValues.REDIS:
return 6379;
case SemanticAttributes.DbSystemValues.MARIADB:
case SemanticAttributes.DbSystemValues.MYSQL:
return 3306;
case SemanticAttributes.DbSystemValues.MSSQL:
return 1433;
case SemanticAttributes.DbSystemValues.DB2:
return 50000;
case SemanticAttributes.DbSystemValues.ORACLE:
return 1521;
case SemanticAttributes.DbSystemValues.H2:
return 8082;
case SemanticAttributes.DbSystemValues.DERBY:
return 1527;
case SemanticAttributes.DbSystemValues.POSTGRESQL:
return 5432;
default:
return 0;
}
}
private void exportRequest(SpanData span, List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
RequestData data = new RequestData();
initTelemetry(telemetry, data, "Request", "RequestData");
data.setProperties(new HashMap<>());
Attributes attributes = span.getAttributes();
long startEpochNanos = span.getStartEpochNanos();
float samplingPercentage = 100;
data.setId(span.getSpanId());
setTime(telemetry, startEpochNanos);
setExtraAttributes(telemetry, data.getProperties(), attributes);
addLinks(data.getProperties(), span.getLinks());
String operationName = getOperationName(span);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName);
telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId());
telemetry
.getTags()
.put(
ContextTagKeys.AI_OPERATION_PARENT_ID.toString(),
span.getParentSpanContext().getSpanId());
data.setName(operationName);
data.setDuration(FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos));
data.setSuccess(getSuccess(span));
String httpUrl = getHttpUrlFromServerSpan(attributes);
if (httpUrl != null) {
data.setUrl(httpUrl);
}
Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE);
if (httpStatusCode == null) {
httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE);
}
if (httpStatusCode != null) {
data.setResponseCode(Long.toString(httpStatusCode));
} else {
data.setResponseCode("0");
}
String locationIp = attributes.get(SemanticAttributes.HTTP_CLIENT_IP);
if (locationIp == null) {
locationIp = attributes.get(SemanticAttributes.NET_PEER_IP);
}
if (locationIp != null) {
telemetry.getTags().put(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp);
}
data.setSource(getSource(attributes));
Long enqueuedTime = attributes.get(AZURE_SDK_ENQUEUED_TIME);
if (enqueuedTime != null) {
long timeSinceEnqueuedMillis =
Math.max(
0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime));
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
Long timeSinceEnqueuedMillis = attributes.get(KAFKA_RECORD_QUEUE_TIME_MS);
if (timeSinceEnqueuedMillis != null) {
if (data.getMeasurements() == null) {
data.setMeasurements(new HashMap<>());
}
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
}
telemetryItems.add(telemetry);
exportEvents(span, operationName, telemetryItems);
}
private boolean getSuccess(SpanData span) {
switch (span.getStatus().getStatusCode()) {
case ERROR:
return false;
case OK:
return true;
case UNSET:
Long statusCode = span.getAttributes().get(SemanticAttributes.HTTP_STATUS_CODE);
return statusCode == null || statusCode < 400;
default:
return true;
}
}
@Nullable
private static String getHttpUrlFromServerSpan(Attributes attributes) {
String httpUrl = attributes.get(SemanticAttributes.HTTP_URL);
if (httpUrl != null) {
return httpUrl;
}
String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME);
if (scheme == null) {
return null;
}
String host = attributes.get(SemanticAttributes.HTTP_HOST);
if (host == null) {
return null;
}
String target = attributes.get(SemanticAttributes.HTTP_TARGET);
if (target == null) {
return null;
}
return scheme + ":
}
private static String getSource(Attributes attributes) {
if (isAzureQueue(attributes)) {
return getAzureSdkTargetSource(attributes);
}
String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM);
if (messagingSystem != null) {
String source =
nullAwareConcat(
getTargetFromPeerAttributes(attributes, 0),
attributes.get(SemanticAttributes.MESSAGING_DESTINATION),
"/");
if (source != null) {
return source;
}
return messagingSystem;
}
return null;
}
private static boolean isAzureQueue(Attributes attributes) {
String azureNamespace = attributes.get(AZURE_NAMESPACE);
return "Microsoft.EventHub".equals(azureNamespace)
|| "Microsoft.ServiceBus".equals(azureNamespace);
}
private static String getOperationName(SpanData span) {
String spanName = span.getName();
String httpMethod = span.getAttributes().get(SemanticAttributes.HTTP_METHOD);
if (httpMethod != null && !httpMethod.isEmpty() && spanName.startsWith("/")) {
return httpMethod + " " + spanName;
}
return spanName;
}
private static String nullAwareConcat(String str1, String str2, String separator) {
if (str1 == null) {
return str2;
}
if (str2 == null) {
return str1;
}
return str1 + separator + str2;
}
private void exportEvents(SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) {
for (EventData event : span.getEvents()) {
if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null
|| event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) {
String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE);
if (stacktrace != null) {
trackException(stacktrace, span, operationName, telemetryItems);
}
return;
}
TelemetryItem telemetry = new TelemetryItem();
MessageData data = new MessageData();
initTelemetry(telemetry, data, "Message", "MessageData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, event.getEpochNanos());
setExtraAttributes(telemetry, data.getProperties(), event.getAttributes());
data.setMessage(event.getName());
telemetryItems.add(telemetry);
}
}
private void trackException(String errorStack, SpanData span, @Nullable String operationName,
List<TelemetryItem> telemetryItems) {
TelemetryItem telemetry = new TelemetryItem();
TelemetryExceptionData data = new TelemetryExceptionData();
initTelemetry(telemetry, data, "Exception", "ExceptionData");
data.setProperties(new HashMap<>());
setOperationId(telemetry, span.getTraceId());
setOperationParentId(telemetry, span.getSpanId());
if (operationName != null) {
setOperationName(telemetry, operationName);
} else {
setOperationName(telemetry, span.getAttributes());
}
setTime(telemetry, span.getEndEpochNanos());
data.setExceptions(Exceptions.minimalParse(errorStack));
telemetryItems.add(telemetry);
}
private void initTelemetry(TelemetryItem telemetry, MonitorDomain data, String telemetryName,
String baseType) {
telemetry.setVersion(1);
telemetry.setName(telemetryName);
telemetry.setInstrumentationKey(instrumentationKey);
telemetry.setTags(new HashMap<>());
telemetry.getTags().put(ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
data.setVersion(2);
MonitorBase monitorBase = new MonitorBase();
telemetry.setData(monitorBase);
monitorBase.setBaseType(baseType);
monitorBase.setBaseData(data);
}
private static void setTime(TelemetryItem telemetry, long epochNanos) {
telemetry.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos));
}
private static void addLinks(Map<String, String> properties, List<LinkData> links) {
if (links.isEmpty()) {
return;
}
StringBuilder sb = new StringBuilder();
sb.append("[");
boolean first = true;
for (LinkData link : links) {
if (!first) {
sb.append(",");
}
sb.append("{\"operation_Id\":\"");
sb.append(link.getSpanContext().getTraceId());
sb.append("\",\"id\":\"");
sb.append(link.getSpanContext().getSpanId());
sb.append("\"}");
first = false;
}
sb.append("]");
properties.put("_MS.links", sb.toString());
}
private static void setExtraAttributes(TelemetryItem telemetry, Map<String, String> properties,
Attributes attributes) {
attributes.forEach((key, value) -> {
String stringKey = key.getKey();
if (stringKey.equals(AZURE_NAMESPACE.getKey())
|| stringKey.equals(AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey())
|| stringKey.equals(AZURE_SDK_ENQUEUED_TIME.getKey())) {
return;
}
if (stringKey.equals(KAFKA_RECORD_QUEUE_TIME_MS.getKey())
|| stringKey.equals(KAFKA_OFFSET.getKey())) {
return;
}
if (stringKey.equals(SemanticAttributes.ENDUSER_ID.getKey()) && value instanceof String) {
telemetry.getTags().put(ContextTagKeys.AI_USER_ID.toString(), (String) value);
return;
}
if (stringKey.equals(SemanticAttributes.HTTP_USER_AGENT.getKey())
&& value instanceof String) {
telemetry.getTags().put("ai.user.userAgent", (String) value);
return;
}
int index = stringKey.indexOf(".");
String prefix = index == -1 ? stringKey : stringKey.substring(0, index);
if (STANDARD_ATTRIBUTE_PREFIXES.contains(prefix)) {
return;
}
String val = convertToString(value, key.getType());
if (value != null) {
properties.put(key.getKey(), val);
}
});
}
@Nullable
private static String convertToString(Object value, AttributeType type) {
switch (type) {
case STRING:
case BOOLEAN:
case LONG:
case DOUBLE:
return String.valueOf(value);
case STRING_ARRAY:
case BOOLEAN_ARRAY:
case LONG_ARRAY:
case DOUBLE_ARRAY:
return join((List<?>) value);
default:
LOGGER.warning("unexpected attribute type: {}", type);
return null;
}
}
private static <T> String join(List<T> values) {
StringBuilder sb = new StringBuilder();
for (Object val : values) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(val);
}
return sb.toString();
}
} | class AzureMonitorTraceExporter implements SpanExporter {
private static final Set<String> SQL_DB_SYSTEMS;
private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES;
private static final AttributeKey<String> AI_OPERATION_NAME_KEY =
AttributeKey.stringKey("applicationinsights.internal.operation_name");
private static final AttributeKey<String> AZURE_NAMESPACE =
AttributeKey.stringKey("az.namespace");
private static final AttributeKey<String> AZURE_SDK_PEER_ADDRESS =
AttributeKey.stringKey("peer.address");
private static final AttributeKey<String> AZURE_SDK_MESSAGE_BUS_DESTINATION =
AttributeKey.stringKey("message_bus.destination");
private static final AttributeKey<Long> AZURE_SDK_ENQUEUED_TIME =
AttributeKey.longKey("x-opt-enqueued-time");
private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS =
longKey("kafka.record.queue_time_ms");
private static final AttributeKey<Long> KAFKA_OFFSET = longKey("kafka.offset");
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorTraceExporter.class);
static {
Set<String> dbSystems = new HashSet<>();
dbSystems.add(SemanticAttributes.DbSystemValues.DB2);
dbSystems.add(SemanticAttributes.DbSystemValues.DERBY);
dbSystems.add(SemanticAttributes.DbSystemValues.MARIADB);
dbSystems.add(SemanticAttributes.DbSystemValues.MSSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.MYSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.ORACLE);
dbSystems.add(SemanticAttributes.DbSystemValues.POSTGRESQL);
dbSystems.add(SemanticAttributes.DbSystemValues.SQLITE);
dbSystems.add(SemanticAttributes.DbSystemValues.OTHER_SQL);
dbSystems.add(SemanticAttributes.DbSystemValues.HSQLDB);
dbSystems.add(SemanticAttributes.DbSystemValues.H2);
SQL_DB_SYSTEMS = Collections.unmodifiableSet(dbSystems);
Set<String> standardAttributesPrefix = new HashSet<>();
standardAttributesPrefix.add("http");
standardAttributesPrefix.add("db");
standardAttributesPrefix.add("message");
standardAttributesPrefix.add("messaging");
standardAttributesPrefix.add("rpc");
standardAttributesPrefix.add("enduser");
standardAttributesPrefix.add("net");
standardAttributesPrefix.add("peer");
standardAttributesPrefix.add("exception");
standardAttributesPrefix.add("thread");
standardAttributesPrefix.add("faas");
STANDARD_ATTRIBUTE_PREFIXES = Collections.unmodifiableSet(standardAttributesPrefix);
}
private final MonitorExporterAsyncClient client;
private final String instrumentationKey;
/**
* Creates an instance of exporter that is configured with given exporter client that sends telemetry events to
* Application Insights resource identified by the instrumentation key.
*
* @param client The client used to send data to Azure Monitor.
* @param instrumentationKey The instrumentation key of Application Insights resource.
*/
AzureMonitorTraceExporter(MonitorExporterAsyncClient client, String instrumentationKey) {
this.client = client;
this.instrumentationKey = instrumentationKey;
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode export(Collection<SpanData> spans) {
CompletableResultCode completableResultCode = new CompletableResultCode();
try {
List<TelemetryItem> telemetryItems = new ArrayList<>();
for (SpanData span : spans) {
LOGGER.verbose("exporting span: {}", span);
export(span, telemetryItems);
}
client.export(telemetryItems)
.subscriberContext(Context.of(Tracer.DISABLE_TRACING_KEY, true))
.subscribe(ignored -> { }, error -> completableResultCode.fail(), completableResultCode::succeed);
return completableResultCode;
} catch (Throwable t) {
LOGGER.error(t.getMessage(), t);
return completableResultCode.fail();
}
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode flush() {
return CompletableResultCode.ofSuccess();
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode shutdown() {
return CompletableResultCode.ofSuccess();
} | class AzureMonitorTraceExporter implements SpanExporter {
private static final Set<String> SQL_DB_SYSTEMS;
private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES;
private static final AttributeKey<String> AI_OPERATION_NAME_KEY =
AttributeKey.stringKey("applicationinsights.internal.operation_name");
private static final AttributeKey<String> AZURE_NAMESPACE =
AttributeKey.stringKey("az.namespace");
private static final AttributeKey<String> AZURE_SDK_PEER_ADDRESS =
AttributeKey.stringKey("peer.address");
private static final AttributeKey<String> AZURE_SDK_MESSAGE_BUS_DESTINATION =
AttributeKey.stringKey("message_bus.destination");
private static final AttributeKey<Long> AZURE_SDK_ENQUEUED_TIME =
AttributeKey.longKey("x-opt-enqueued-time");
private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS =
longKey("kafka.record.queue_time_ms");
private static final AttributeKey<Long> KAFKA_OFFSET = longKey("kafka.offset");
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorTraceExporter.class);
static {
Set<String> dbSystems = new HashSet<>();
dbSystems.add(SemanticAttributes.DbSystemValues.DB2);
dbSystems.add(SemanticAttributes.DbSystemValues.DERBY);
dbSystems.add(SemanticAttributes.DbSystemValues.MARIADB);
dbSystems.add(SemanticAttributes.DbSystemValues.MSSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.MYSQL);
dbSystems.add(SemanticAttributes.DbSystemValues.ORACLE);
dbSystems.add(SemanticAttributes.DbSystemValues.POSTGRESQL);
dbSystems.add(SemanticAttributes.DbSystemValues.SQLITE);
dbSystems.add(SemanticAttributes.DbSystemValues.OTHER_SQL);
dbSystems.add(SemanticAttributes.DbSystemValues.HSQLDB);
dbSystems.add(SemanticAttributes.DbSystemValues.H2);
SQL_DB_SYSTEMS = Collections.unmodifiableSet(dbSystems);
Set<String> standardAttributesPrefix = new HashSet<>();
standardAttributesPrefix.add("http");
standardAttributesPrefix.add("db");
standardAttributesPrefix.add("message");
standardAttributesPrefix.add("messaging");
standardAttributesPrefix.add("rpc");
standardAttributesPrefix.add("enduser");
standardAttributesPrefix.add("net");
standardAttributesPrefix.add("peer");
standardAttributesPrefix.add("exception");
standardAttributesPrefix.add("thread");
standardAttributesPrefix.add("faas");
STANDARD_ATTRIBUTE_PREFIXES = Collections.unmodifiableSet(standardAttributesPrefix);
}
private final MonitorExporterAsyncClient client;
private final String instrumentationKey;
/**
* Creates an instance of exporter that is configured with given exporter client that sends telemetry events to
* Application Insights resource identified by the instrumentation key.
*
* @param client The client used to send data to Azure Monitor.
* @param instrumentationKey The instrumentation key of Application Insights resource.
*/
AzureMonitorTraceExporter(MonitorExporterAsyncClient client, String instrumentationKey) {
this.client = client;
this.instrumentationKey = instrumentationKey;
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode export(Collection<SpanData> spans) {
CompletableResultCode completableResultCode = new CompletableResultCode();
try {
List<TelemetryItem> telemetryItems = new ArrayList<>();
for (SpanData span : spans) {
LOGGER.verbose("exporting span: {}", span);
export(span, telemetryItems);
}
client.export(telemetryItems)
.subscriberContext(Context.of(Tracer.DISABLE_TRACING_KEY, true))
.subscribe(ignored -> { }, error -> completableResultCode.fail(), completableResultCode::succeed);
return completableResultCode;
} catch (Throwable t) {
LOGGER.error(t.getMessage(), t);
return completableResultCode.fail();
}
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode flush() {
return CompletableResultCode.ofSuccess();
}
/**
* {@inheritDoc}
*/
@Override
public CompletableResultCode shutdown() {
return CompletableResultCode.ofSuccess();
} |
Use `UncheckedIOException` instead. | public ResourceDeleteCancelEventData setAuthorization(String authorization) {
try {
setResourceAuthorization(
defaultSerializerAdapter.deserialize(authorization, ResourceAuthorization.class,
SerializerEncoding.JSON));
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return this;
} | throw LOGGER.logExceptionAsError(new RuntimeException(e)); | public ResourceDeleteCancelEventData setAuthorization(String authorization) {
try {
setResourceAuthorization(
DEFAULT_SERIALIZER_ADAPTER.deserialize(authorization, ResourceAuthorization.class,
SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
return this;
} | class ResourceDeleteCancelEventData {
static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class);
final SerializerAdapter defaultSerializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
/*
* The tenant ID of the resource.
*/
@JsonProperty(value = "tenantId")
private String tenantId;
/*
* The subscription ID of the resource.
*/
@JsonProperty(value = "subscriptionId")
private String subscriptionId;
/*
* The resource group of the resource.
*/
@JsonProperty(value = "resourceGroup")
private String resourceGroup;
/*
* The resource provider performing the operation.
*/
@JsonProperty(value = "resourceProvider")
private String resourceProvider;
/*
* The URI of the resource in the operation.
*/
@JsonProperty(value = "resourceUri")
private String resourceUri;
/*
* The operation that was performed.
*/
@JsonProperty(value = "operationName")
private String operationName;
/*
* The status of the operation.
*/
@JsonProperty(value = "status")
private String status;
private String authorizationString;
/*
* The requested authorization for the operation.
*/
@JsonProperty(value = "authorization")
private ResourceAuthorization authorization;
private String claimsString;
/*
* The properties of the claims.
*/
@JsonProperty(value = "claims")
private Map<String, String> claims;
/*
* An operation ID used for troubleshooting.
*/
@JsonProperty(value = "correlationId")
private String correlationId;
private String httpRequestString;
/*
* The details of the operation.
*/
@JsonProperty(value = "httpRequest")
private ResourceHttpRequest httpRequest;
/**
* Get the tenantId property: The tenant ID of the resource.
*
* @return the tenantId value.
*/
public String getTenantId() {
return this.tenantId;
}
/**
* Set the tenantId property: The tenant ID of the resource.
*
* @param tenantId the tenantId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setTenantId(String tenantId) {
this.tenantId = tenantId;
return this;
}
/**
* Get the subscriptionId property: The subscription ID of the resource.
*
* @return the subscriptionId value.
*/
public String getSubscriptionId() {
return this.subscriptionId;
}
/**
* Set the subscriptionId property: The subscription ID of the resource.
*
* @param subscriptionId the subscriptionId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) {
this.subscriptionId = subscriptionId;
return this;
}
/**
* Get the resourceGroup property: The resource group of the resource.
*
* @return the resourceGroup value.
*/
public String getResourceGroup() {
return this.resourceGroup;
}
/**
* Set the resourceGroup property: The resource group of the resource.
*
* @param resourceGroup the resourceGroup value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) {
this.resourceGroup = resourceGroup;
return this;
}
/**
* Get the resourceProvider property: The resource provider performing the operation.
*
* @return the resourceProvider value.
*/
public String getResourceProvider() {
return this.resourceProvider;
}
/**
* Set the resourceProvider property: The resource provider performing the operation.
*
* @param resourceProvider the resourceProvider value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) {
this.resourceProvider = resourceProvider;
return this;
}
/**
* Get the resourceUri property: The URI of the resource in the operation.
*
* @return the resourceUri value.
*/
public String getResourceUri() {
return this.resourceUri;
}
/**
* Set the resourceUri property: The URI of the resource in the operation.
*
* @param resourceUri the resourceUri value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceUri(String resourceUri) {
this.resourceUri = resourceUri;
return this;
}
/**
* Get the operationName property: The operation that was performed.
*
* @return the operationName value.
*/
public String getOperationName() {
return this.operationName;
}
/**
* Set the operationName property: The operation that was performed.
*
* @param operationName the operationName value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setOperationName(String operationName) {
this.operationName = operationName;
return this;
}
/**
* Get the status property: The status of the operation.
*
* @return the status value.
*/
public String getStatus() {
return this.status;
}
/**
* Set the status property: The status of the operation.
*
* @param status the status value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setStatus(String status) {
this.status = status;
return this;
}
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getAuthorization() {
final ResourceAuthorization resourceAuthorization = getResourceAuthorization();
try {
return defaultSerializerAdapter.serialize(resourceAuthorization, SerializerEncoding.JSON);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
*/
public ResourceAuthorization getResourceAuthorization() {
return this.authorization;
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) {
this.authorization = authorization;
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getClaims() {
final Map<String, String> resourceClaims = getResourceClaims();
if (!resourceClaims.isEmpty()) {
try {
return defaultSerializerAdapter.serialize(resourceClaims, SerializerEncoding.JSON);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
return null;
}
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setClaims(String claims) {
try {
setResourceClaims(defaultSerializerAdapter.deserialize(claims, Map.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
*/
public Map<String, String> getResourceClaims() {
return this.claims;
}
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) {
this.claims = claims;
return this;
}
/**
* Get the correlationId property: An operation ID used for troubleshooting.
*
* @return the correlationId value.
*/
public String getCorrelationId() {
return this.correlationId;
}
/**
* Set the correlationId property: An operation ID used for troubleshooting.
*
* @param correlationId the correlationId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setCorrelationId(String correlationId) {
this.correlationId = correlationId;
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getHttpRequest() {
ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest();
try {
return defaultSerializerAdapter.serialize(resourceHttpRequest, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) {
try {
setResourceHttpRequest(
defaultSerializerAdapter.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
*/
public ResourceHttpRequest getResourceHttpRequest() {
return this.httpRequest;
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) {
this.httpRequest = httpRequest;
return this;
}
} | class ResourceDeleteCancelEventData {
private static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class);
private static final SerializerAdapter DEFAULT_SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
/*
* The tenant ID of the resource.
*/
@JsonProperty(value = "tenantId")
private String tenantId;
/*
* The subscription ID of the resource.
*/
@JsonProperty(value = "subscriptionId")
private String subscriptionId;
/*
* The resource group of the resource.
*/
@JsonProperty(value = "resourceGroup")
private String resourceGroup;
/*
* The resource provider performing the operation.
*/
@JsonProperty(value = "resourceProvider")
private String resourceProvider;
/*
* The URI of the resource in the operation.
*/
@JsonProperty(value = "resourceUri")
private String resourceUri;
/*
* The operation that was performed.
*/
@JsonProperty(value = "operationName")
private String operationName;
/*
* The status of the operation.
*/
@JsonProperty(value = "status")
private String status;
/*
* The requested authorization for the operation.
*/
@JsonProperty(value = "authorization")
private ResourceAuthorization authorization;
/*
* The properties of the claims.
*/
@JsonProperty(value = "claims")
private Map<String, String> claims;
/*
* An operation ID used for troubleshooting.
*/
@JsonProperty(value = "correlationId")
private String correlationId;
/*
* The details of the operation.
*/
@JsonProperty(value = "httpRequest")
private ResourceHttpRequest httpRequest;
/**
* Get the tenantId property: The tenant ID of the resource.
*
* @return the tenantId value.
*/
public String getTenantId() {
return this.tenantId;
}
/**
* Set the tenantId property: The tenant ID of the resource.
*
* @param tenantId the tenantId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setTenantId(String tenantId) {
this.tenantId = tenantId;
return this;
}
/**
* Get the subscriptionId property: The subscription ID of the resource.
*
* @return the subscriptionId value.
*/
public String getSubscriptionId() {
return this.subscriptionId;
}
/**
* Set the subscriptionId property: The subscription ID of the resource.
*
* @param subscriptionId the subscriptionId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) {
this.subscriptionId = subscriptionId;
return this;
}
/**
* Get the resourceGroup property: The resource group of the resource.
*
* @return the resourceGroup value.
*/
public String getResourceGroup() {
return this.resourceGroup;
}
/**
* Set the resourceGroup property: The resource group of the resource.
*
* @param resourceGroup the resourceGroup value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) {
this.resourceGroup = resourceGroup;
return this;
}
/**
* Get the resourceProvider property: The resource provider performing the operation.
*
* @return the resourceProvider value.
*/
public String getResourceProvider() {
return this.resourceProvider;
}
/**
* Set the resourceProvider property: The resource provider performing the operation.
*
* @param resourceProvider the resourceProvider value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) {
this.resourceProvider = resourceProvider;
return this;
}
/**
* Get the resourceUri property: The URI of the resource in the operation.
*
* @return the resourceUri value.
*/
public String getResourceUri() {
return this.resourceUri;
}
/**
* Set the resourceUri property: The URI of the resource in the operation.
*
* @param resourceUri the resourceUri value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceUri(String resourceUri) {
this.resourceUri = resourceUri;
return this;
}
/**
* Get the operationName property: The operation that was performed.
*
* @return the operationName value.
*/
public String getOperationName() {
return this.operationName;
}
/**
* Set the operationName property: The operation that was performed.
*
* @param operationName the operationName value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setOperationName(String operationName) {
this.operationName = operationName;
return this;
}
/**
* Get the status property: The status of the operation.
*
* @return the status value.
*/
public String getStatus() {
return this.status;
}
/**
* Set the status property: The status of the operation.
*
* @param status the status value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setStatus(String status) {
this.status = status;
return this;
}
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getAuthorization() {
final ResourceAuthorization resourceAuthorization = getResourceAuthorization();
try {
return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceAuthorization, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
*/
public ResourceAuthorization getResourceAuthorization() {
return this.authorization;
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) {
this.authorization = authorization;
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getClaims() {
final Map<String, String> resourceClaims = getResourceClaims();
if (!resourceClaims.isEmpty()) {
try {
return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceClaims, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
return null;
}
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setClaims(String claims) {
try {
setResourceClaims(DEFAULT_SERIALIZER_ADAPTER.deserialize(claims, Map.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
*/
public Map<String, String> getResourceClaims() {
return this.claims;
}
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) {
this.claims = claims;
return this;
}
/**
* Get the correlationId property: An operation ID used for troubleshooting.
*
* @return the correlationId value.
*/
public String getCorrelationId() {
return this.correlationId;
}
/**
* Set the correlationId property: An operation ID used for troubleshooting.
*
* @param correlationId the correlationId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setCorrelationId(String correlationId) {
this.correlationId = correlationId;
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getHttpRequest() {
ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest();
try {
return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceHttpRequest, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) {
try {
setResourceHttpRequest(
DEFAULT_SERIALIZER_ADAPTER.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
*/
public ResourceHttpRequest getResourceHttpRequest() {
return this.httpRequest;
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) {
this.httpRequest = httpRequest;
return this;
}
} |
Add unit test for this as we should make sure the map is getting serialized and deserialized correctly. | public String getClaims() {
final Map<String, String> resourceClaims = getResourceClaims();
if (!resourceClaims.isEmpty()) {
try {
return defaultSerializerAdapter.serialize(resourceClaims, SerializerEncoding.JSON);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
return null;
} | return defaultSerializerAdapter.serialize(resourceClaims, SerializerEncoding.JSON); | public String getClaims() {
final Map<String, String> resourceClaims = getResourceClaims();
if (!resourceClaims.isEmpty()) {
try {
return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceClaims, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
return null;
} | class ResourceDeleteCancelEventData {
static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class);
final SerializerAdapter defaultSerializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
/*
* The tenant ID of the resource.
*/
@JsonProperty(value = "tenantId")
private String tenantId;
/*
* The subscription ID of the resource.
*/
@JsonProperty(value = "subscriptionId")
private String subscriptionId;
/*
* The resource group of the resource.
*/
@JsonProperty(value = "resourceGroup")
private String resourceGroup;
/*
* The resource provider performing the operation.
*/
@JsonProperty(value = "resourceProvider")
private String resourceProvider;
/*
* The URI of the resource in the operation.
*/
@JsonProperty(value = "resourceUri")
private String resourceUri;
/*
* The operation that was performed.
*/
@JsonProperty(value = "operationName")
private String operationName;
/*
* The status of the operation.
*/
@JsonProperty(value = "status")
private String status;
private String authorizationString;
/*
* The requested authorization for the operation.
*/
@JsonProperty(value = "authorization")
private ResourceAuthorization authorization;
private String claimsString;
/*
* The properties of the claims.
*/
@JsonProperty(value = "claims")
private Map<String, String> claims;
/*
* An operation ID used for troubleshooting.
*/
@JsonProperty(value = "correlationId")
private String correlationId;
private String httpRequestString;
/*
* The details of the operation.
*/
@JsonProperty(value = "httpRequest")
private ResourceHttpRequest httpRequest;
/**
* Get the tenantId property: The tenant ID of the resource.
*
* @return the tenantId value.
*/
public String getTenantId() {
return this.tenantId;
}
/**
* Set the tenantId property: The tenant ID of the resource.
*
* @param tenantId the tenantId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setTenantId(String tenantId) {
this.tenantId = tenantId;
return this;
}
/**
* Get the subscriptionId property: The subscription ID of the resource.
*
* @return the subscriptionId value.
*/
public String getSubscriptionId() {
return this.subscriptionId;
}
/**
* Set the subscriptionId property: The subscription ID of the resource.
*
* @param subscriptionId the subscriptionId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) {
this.subscriptionId = subscriptionId;
return this;
}
/**
* Get the resourceGroup property: The resource group of the resource.
*
* @return the resourceGroup value.
*/
public String getResourceGroup() {
return this.resourceGroup;
}
/**
* Set the resourceGroup property: The resource group of the resource.
*
* @param resourceGroup the resourceGroup value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) {
this.resourceGroup = resourceGroup;
return this;
}
/**
* Get the resourceProvider property: The resource provider performing the operation.
*
* @return the resourceProvider value.
*/
public String getResourceProvider() {
return this.resourceProvider;
}
/**
* Set the resourceProvider property: The resource provider performing the operation.
*
* @param resourceProvider the resourceProvider value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) {
this.resourceProvider = resourceProvider;
return this;
}
/**
* Get the resourceUri property: The URI of the resource in the operation.
*
* @return the resourceUri value.
*/
public String getResourceUri() {
return this.resourceUri;
}
/**
* Set the resourceUri property: The URI of the resource in the operation.
*
* @param resourceUri the resourceUri value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceUri(String resourceUri) {
this.resourceUri = resourceUri;
return this;
}
/**
* Get the operationName property: The operation that was performed.
*
* @return the operationName value.
*/
public String getOperationName() {
return this.operationName;
}
/**
* Set the operationName property: The operation that was performed.
*
* @param operationName the operationName value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setOperationName(String operationName) {
this.operationName = operationName;
return this;
}
/**
* Get the status property: The status of the operation.
*
* @return the status value.
*/
public String getStatus() {
return this.status;
}
/**
* Set the status property: The status of the operation.
*
* @param status the status value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setStatus(String status) {
this.status = status;
return this;
}
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getAuthorization() {
final ResourceAuthorization resourceAuthorization = getResourceAuthorization();
try {
return defaultSerializerAdapter.serialize(resourceAuthorization, SerializerEncoding.JSON);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setAuthorization(String authorization) {
try {
setResourceAuthorization(
defaultSerializerAdapter.deserialize(authorization, ResourceAuthorization.class,
SerializerEncoding.JSON));
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return this;
}
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
*/
public ResourceAuthorization getResourceAuthorization() {
return this.authorization;
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) {
this.authorization = authorization;
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setClaims(String claims) {
try {
setResourceClaims(defaultSerializerAdapter.deserialize(claims, Map.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
*/
public Map<String, String> getResourceClaims() {
return this.claims;
}
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) {
this.claims = claims;
return this;
}
/**
* Get the correlationId property: An operation ID used for troubleshooting.
*
* @return the correlationId value.
*/
public String getCorrelationId() {
return this.correlationId;
}
/**
* Set the correlationId property: An operation ID used for troubleshooting.
*
* @param correlationId the correlationId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setCorrelationId(String correlationId) {
this.correlationId = correlationId;
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getHttpRequest() {
ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest();
try {
return defaultSerializerAdapter.serialize(resourceHttpRequest, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) {
try {
setResourceHttpRequest(
defaultSerializerAdapter.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
*/
public ResourceHttpRequest getResourceHttpRequest() {
return this.httpRequest;
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) {
this.httpRequest = httpRequest;
return this;
}
} | class ResourceDeleteCancelEventData {
private static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class);
private static final SerializerAdapter DEFAULT_SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
/*
* The tenant ID of the resource.
*/
@JsonProperty(value = "tenantId")
private String tenantId;
/*
* The subscription ID of the resource.
*/
@JsonProperty(value = "subscriptionId")
private String subscriptionId;
/*
* The resource group of the resource.
*/
@JsonProperty(value = "resourceGroup")
private String resourceGroup;
/*
* The resource provider performing the operation.
*/
@JsonProperty(value = "resourceProvider")
private String resourceProvider;
/*
* The URI of the resource in the operation.
*/
@JsonProperty(value = "resourceUri")
private String resourceUri;
/*
* The operation that was performed.
*/
@JsonProperty(value = "operationName")
private String operationName;
/*
* The status of the operation.
*/
@JsonProperty(value = "status")
private String status;
/*
* The requested authorization for the operation.
*/
@JsonProperty(value = "authorization")
private ResourceAuthorization authorization;
/*
* The properties of the claims.
*/
@JsonProperty(value = "claims")
private Map<String, String> claims;
/*
* An operation ID used for troubleshooting.
*/
@JsonProperty(value = "correlationId")
private String correlationId;
/*
* The details of the operation.
*/
@JsonProperty(value = "httpRequest")
private ResourceHttpRequest httpRequest;
/**
* Get the tenantId property: The tenant ID of the resource.
*
* @return the tenantId value.
*/
public String getTenantId() {
return this.tenantId;
}
/**
* Set the tenantId property: The tenant ID of the resource.
*
* @param tenantId the tenantId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setTenantId(String tenantId) {
this.tenantId = tenantId;
return this;
}
/**
* Get the subscriptionId property: The subscription ID of the resource.
*
* @return the subscriptionId value.
*/
public String getSubscriptionId() {
return this.subscriptionId;
}
/**
* Set the subscriptionId property: The subscription ID of the resource.
*
* @param subscriptionId the subscriptionId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) {
this.subscriptionId = subscriptionId;
return this;
}
/**
* Get the resourceGroup property: The resource group of the resource.
*
* @return the resourceGroup value.
*/
public String getResourceGroup() {
return this.resourceGroup;
}
/**
* Set the resourceGroup property: The resource group of the resource.
*
* @param resourceGroup the resourceGroup value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) {
this.resourceGroup = resourceGroup;
return this;
}
/**
* Get the resourceProvider property: The resource provider performing the operation.
*
* @return the resourceProvider value.
*/
public String getResourceProvider() {
return this.resourceProvider;
}
/**
* Set the resourceProvider property: The resource provider performing the operation.
*
* @param resourceProvider the resourceProvider value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) {
this.resourceProvider = resourceProvider;
return this;
}
/**
* Get the resourceUri property: The URI of the resource in the operation.
*
* @return the resourceUri value.
*/
public String getResourceUri() {
return this.resourceUri;
}
/**
* Set the resourceUri property: The URI of the resource in the operation.
*
* @param resourceUri the resourceUri value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceUri(String resourceUri) {
this.resourceUri = resourceUri;
return this;
}
/**
* Get the operationName property: The operation that was performed.
*
* @return the operationName value.
*/
public String getOperationName() {
return this.operationName;
}
/**
* Set the operationName property: The operation that was performed.
*
* @param operationName the operationName value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setOperationName(String operationName) {
this.operationName = operationName;
return this;
}
/**
* Get the status property: The status of the operation.
*
* @return the status value.
*/
public String getStatus() {
return this.status;
}
/**
* Set the status property: The status of the operation.
*
* @param status the status value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setStatus(String status) {
this.status = status;
return this;
}
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getAuthorization() {
final ResourceAuthorization resourceAuthorization = getResourceAuthorization();
try {
return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceAuthorization, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setAuthorization(String authorization) {
try {
setResourceAuthorization(
DEFAULT_SERIALIZER_ADAPTER.deserialize(authorization, ResourceAuthorization.class,
SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
return this;
}
/**
* Get the authorization property: The requested authorization for the operation.
*
* @return the authorization value.
*/
public ResourceAuthorization getResourceAuthorization() {
return this.authorization;
}
/**
* Set the authorization property: The requested authorization for the operation.
*
* @param authorization the authorization value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) {
this.authorization = authorization;
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setClaims(String claims) {
try {
setResourceClaims(DEFAULT_SERIALIZER_ADAPTER.deserialize(claims, Map.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
return this;
}
/**
* Get the claims property: The properties of the claims.
*
* @return the claims value.
*/
public Map<String, String> getResourceClaims() {
return this.claims;
}
/**
* Set the claims property: The properties of the claims.
*
* @param claims the claims value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) {
this.claims = claims;
return this;
}
/**
* Get the correlationId property: An operation ID used for troubleshooting.
*
* @return the correlationId value.
*/
public String getCorrelationId() {
return this.correlationId;
}
/**
* Set the correlationId property: An operation ID used for troubleshooting.
*
* @param correlationId the correlationId value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setCorrelationId(String correlationId) {
this.correlationId = correlationId;
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public String getHttpRequest() {
ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest();
try {
return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceHttpRequest, SerializerEncoding.JSON);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
* @deprecated This method is no longer supported since v4.9.0.
* <p> Use {@link ResourceDeleteCancelEventData
*/
@Deprecated
public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) {
try {
setResourceHttpRequest(
DEFAULT_SERIALIZER_ADAPTER.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
return this;
}
/**
* Get the httpRequest property: The details of the operation.
*
* @return the httpRequest value.
*/
public ResourceHttpRequest getResourceHttpRequest() {
return this.httpRequest;
}
/**
* Set the httpRequest property: The details of the operation.
*
* @param httpRequest the httpRequest value to set.
* @return the ResourceDeleteCancelEventData object itself.
*/
public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) {
this.httpRequest = httpRequest;
return this;
}
} |
Instead of capturing and resetting the position a `readOnly` view can be passed into `MessageDigest`. https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html#asReadOnlyBuffer-- #Resolved | public static String byteArrayToHex(byte[] a) {
StringBuilder sb = new StringBuilder(a.length * 2);
for (byte b: a) {
sb.append(String.format("%02x", b));
}
return sb.toString();
} | for (byte b: a) { | public static String byteArrayToHex(byte[] a) {
StringBuilder sb = new StringBuilder(a.length * 2);
for (byte b: a) {
sb.append(String.format("%02x", b));
}
return sb.toString();
} | class UtilsImpl {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
private static final int HTTP_STATUS_CODE_NOT_FOUND = 404;
private static final int HTTP_STATUS_CODE_ACCEPTED = 202;
public static final String OCI_MANIFEST_MEDIA_TYPE;
public static final String DOCKER_DIGEST_HEADER_NAME;
static {
Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json";
DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest";
}
/**
* This method builds the httpPipeline for the builders.
* @param clientOptions The client options
* @param logOptions http log options.
* @param configuration configuration settings.
* @param retryPolicy retry policy
* @param credential credentials.
* @param perCallPolicies per call policies.
* @param perRetryPolicies per retry policies.
* @param httpClient http client
* @param endpoint endpoint to be called
* @param serviceVersion the service api version being targeted by the client.
* @return returns the httpPipeline to be consumed by the builders.
*/
public static HttpPipeline buildHttpPipeline(
ClientOptions clientOptions,
HttpLogOptions logOptions,
Configuration configuration,
RetryPolicy retryPolicy,
TokenCredential credential,
ContainerRegistryAudience audience,
List<HttpPipelinePolicy> perCallPolicies,
List<HttpPipelinePolicy> perRetryPolicies,
HttpClient httpClient,
String endpoint,
ContainerRegistryServiceVersion serviceVersion,
ClientLogger logger) {
ArrayList<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(
new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy);
policies.add(new CookiePolicy());
policies.add(new AddDatePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions);
if (credential == null) {
logger.verbose("Credentials are null, enabling anonymous access");
}
ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies);
credentialPolicies.add(loggingPolicy);
ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(
credential,
audience,
endpoint,
serviceVersion,
new HttpPipelineBuilder()
.policies(credentialPolicies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build(),
JacksonAdapter.createDefaultSerializerAdapter());
ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService);
policies.add(credentialsPolicy);
policies.add(loggingPolicy);
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) {
ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>();
for (HttpPipelinePolicy policy:policies) {
clonedPolicy.add(policy);
}
return clonedPolicy;
}
/**
* This method computes the digest for the buffer content.
* Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build.
* @param buffer The buffer containing the image bytes.
* @return SHA-256 digest for the given buffer.
*/
public static String computeDigest(ByteBuffer buffer) {
int startPosition = buffer.position();
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(buffer);
byte[] digest = md.digest();
return "sha256:" + byteArrayToHex(digest);
} catch (NoSuchAlgorithmException e) {
}
finally {
buffer.position(startPosition);
}
return null;
}
/**
* Delete operation should be idempotent.
* And so should result in a success in case the service response is 400 : Not found.
* @param responseT The response object.
* @param <T> The encapsulating value.
* @return The transformed response object.
*/
public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) {
if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) {
return getAcceptedDeleteResponse(responseT, responseT.getStatusCode());
}
return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED);
}
static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) {
return Mono.just(new ResponseBase<String, Void>(
responseT.getRequest(),
statusCode,
responseT.getHeaders(),
null,
null));
}
/**
* This method converts the API response codes into well known exceptions.
* @param exception The exception returned by the rest client.
* @return The exception returned by the public methods.
*/
public static Throwable mapException(Throwable exception) {
AcrErrorsException acrException = null;
if (exception instanceof AcrErrorsException) {
acrException = ((AcrErrorsException) exception);
} else if (exception instanceof RuntimeException) {
RuntimeException runtimeException = (RuntimeException) exception;
Throwable throwable = runtimeException.getCause();
if (throwable instanceof AcrErrorsException) {
acrException = (AcrErrorsException) throwable;
}
}
if (acrException == null) {
return exception;
}
final HttpResponse errorHttpResponse = acrException.getResponse();
final int statusCode = errorHttpResponse.getStatusCode();
final String errorDetail = acrException.getMessage();
switch (statusCode) {
case 401:
return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception);
case 404:
return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception);
case 409:
return new ResourceExistsException(errorDetail, acrException.getResponse(), exception);
case 412:
return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception);
default:
return new HttpResponseException(errorDetail, acrException.getResponse(), exception);
}
}
} | class UtilsImpl {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
private static final int HTTP_STATUS_CODE_NOT_FOUND;
private static final int HTTP_STATUS_CODE_ACCEPTED;
private static final String CONTINUATION_LINK_HEADER_NAME;
private static final Pattern CONTINUATION_LINK_PATTERN;
public static final String OCI_MANIFEST_MEDIA_TYPE;
public static final String DOCKER_DIGEST_HEADER_NAME;
public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE;
static {
Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
HTTP_STATUS_CODE_NOT_FOUND = 404;
HTTP_STATUS_CODE_ACCEPTED = 202;
OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json";
DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest";
CONTINUATION_LINK_HEADER_NAME = "Link";
CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*");
CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry";
}
private UtilsImpl() { }
/**
* This method builds the httpPipeline for the builders.
* @param clientOptions The client options
* @param logOptions http log options.
* @param configuration configuration settings.
* @param retryPolicy retry policy
* @param retryOptions retry options
* @param credential credentials.
* @param perCallPolicies per call policies.
* @param perRetryPolicies per retry policies.
* @param httpClient http client
* @param endpoint endpoint to be called
* @param serviceVersion the service api version being targeted by the client.
* @return returns the httpPipeline to be consumed by the builders.
*/
public static HttpPipeline buildHttpPipeline(
ClientOptions clientOptions,
HttpLogOptions logOptions,
Configuration configuration,
RetryPolicy retryPolicy,
RetryOptions retryOptions,
TokenCredential credential,
ContainerRegistryAudience audience,
List<HttpPipelinePolicy> perCallPolicies,
List<HttpPipelinePolicy> perRetryPolicies,
HttpClient httpClient,
String endpoint,
ContainerRegistryServiceVersion serviceVersion,
ClientLogger logger) {
ArrayList<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(
new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(new CookiePolicy());
policies.add(new AddDatePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions);
if (credential == null) {
logger.verbose("Credentials are null, enabling anonymous access");
}
ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies);
credentialPolicies.add(loggingPolicy);
if (audience == null) {
audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD;
}
ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(
credential,
audience,
endpoint,
serviceVersion,
new HttpPipelineBuilder()
.policies(credentialPolicies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build(),
JacksonAdapter.createDefaultSerializerAdapter());
ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService);
policies.add(credentialsPolicy);
policies.add(loggingPolicy);
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) {
ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>();
for (HttpPipelinePolicy policy:policies) {
clonedPolicy.add(policy);
}
return clonedPolicy;
}
/**
* This method computes the digest for the buffer content.
* Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build.
* @param buffer The buffer containing the image bytes.
* @return SHA-256 digest for the given buffer.
*/
public static String computeDigest(ByteBuffer buffer) {
ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer();
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(readOnlyBuffer);
byte[] digest = md.digest();
return "sha256:" + byteArrayToHex(digest);
} catch (NoSuchAlgorithmException e) {
}
return null;
}
/**
* Delete operation should be idempotent.
* And so should result in a success in case the service response is 400 : Not found.
* @param responseT The response object.
* @param <T> The encapsulating value.
* @return The transformed response object.
*/
public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) {
if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) {
return getAcceptedDeleteResponse(responseT, responseT.getStatusCode());
}
return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED);
}
static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) {
return Mono.just(new ResponseBase<String, Void>(
responseT.getRequest(),
statusCode,
responseT.getHeaders(),
null,
null));
}
/**
* This method converts the API response codes into well known exceptions.
* @param exception The exception returned by the rest client.
* @return The exception returned by the public methods.
*/
public static Throwable mapException(Throwable exception) {
AcrErrorsException acrException = null;
if (exception instanceof AcrErrorsException) {
acrException = ((AcrErrorsException) exception);
} else if (exception instanceof RuntimeException) {
RuntimeException runtimeException = (RuntimeException) exception;
Throwable throwable = runtimeException.getCause();
if (throwable instanceof AcrErrorsException) {
acrException = (AcrErrorsException) throwable;
}
}
if (acrException == null) {
return exception;
}
final HttpResponse errorHttpResponse = acrException.getResponse();
final int statusCode = errorHttpResponse.getStatusCode();
final String errorDetail = acrException.getMessage();
switch (statusCode) {
case 401:
return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception);
case 404:
return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception);
case 409:
return new ResourceExistsException(errorDetail, acrException.getResponse(), exception);
case 412:
return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception);
default:
return new HttpResponseException(errorDetail, acrException.getResponse(), exception);
}
}
/**
* This method parses the response to get the continuation token used to make the next pagination call.
* The continuation token is returned by the service in the form of a header and not as a nextLink field.
* @param listResponse response that is parsed.
* @param <T> the model type that is being operated on.
* @return paged response with the correct continuation token.
*/
public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) {
return getPagedResponseWithContinuationToken(listResponse, values -> values);
}
/**
* This method parses the response to get the continuation token used to make the next pagination call.
* The continuation token is returned by the service in the form of a header and not as a nextLink field.
*
* <p>
* Per the Docker v2 HTTP API spec, the Link header is an RFC5988
* compliant rel='next' with URL to next result set, if available.
* See: https:
*
* The URI reference can be obtained from link-value as follows:
* Link = "Link" ":"
* link-value = "<" URI-Reference ">" * (";" link-param )
* See: https:
* </p>
* @param listResponse response that is parsed.
* @param mapperFunction the function that maps the rest api response into the public model exposed by the client.
* @param <T> The model type returned by the rest client.
* @param <R> The model type returned by the public client.
* @return paged response with the correct continuation token.
*/
public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) {
Objects.requireNonNull(mapperFunction);
String continuationLink = null;
HttpHeaders headers = listResponse.getHeaders();
if (headers != null) {
String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME);
if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) {
Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader);
if (matcher.matches()) {
if (matcher.groupCount() == 1) {
continuationLink = matcher.group(1);
}
}
}
}
List<T> values = mapperFunction.apply(listResponse.getValue());
return new PagedResponseBase<String, T>(
listResponse.getRequest(),
listResponse.getStatusCode(),
listResponse.getHeaders(),
values,
continuationLink,
null
);
}
} |
Did the service remove API version? #Resolved | public TokenServiceImpl(String url, ContainerRegistryServiceVersion apiVersion, HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
if (serializerAdapter == null) {
serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
this.authenticationsImpl = new AuthenticationsImpl(url, pipeline, serializerAdapter);
} | public TokenServiceImpl(String url, ContainerRegistryServiceVersion apiVersion, HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
if (serializerAdapter == null) {
serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
if (apiVersion == null) {
apiVersion = ContainerRegistryServiceVersion.getLatest();
}
this.authenticationsImpl = new AuthenticationsImpl(url, apiVersion.getVersion(), pipeline, serializerAdapter);
} | class TokenServiceImpl {
private final AuthenticationsImpl authenticationsImpl;
private static final String REFRESHTOKEN_GRANTTYPE = "refresh_token";
private static final String ACCESSTOKEN_GRANTTYPE = "access_token";
/**
* Creates an instance of the token service impl class.TokenServiceImpl.java
* @param url the service endpoint.
* @param apiVersion the api-version of the service being targeted.
* @param pipeline the pipeline to use to make the call.
* @param serializerAdapter the serializer adapter for the rest client.
*
*/
/**
* Gets the ACR access token.
* @param acrRefreshToken Given the ACRs refresh token.
* @param scope - Token scope.
* @param serviceName The name of the service.
*
*/
public Mono<AccessToken> getAcrAccessTokenAsync(String acrRefreshToken, String scope, String serviceName, String grantType) {
return this.authenticationsImpl.exchangeAcrRefreshTokenForAcrAccessTokenAsync(serviceName, scope, grantType, acrRefreshToken)
.map(token -> {
String accessToken = token.getAccessToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(accessToken);
return new AccessToken(accessToken, expirationTime);
});
}
/**
* Gets an ACR refresh token.
* @param aadAccessToken Given the ACR access token.
* @param serviceName Given the ACR service.
*
*/
public Mono<AccessToken> getAcrRefreshTokenAsync(String aadAccessToken, String serviceName) {
return this.authenticationsImpl.exchangeAadAccessTokenForAcrRefreshTokenAsync(
serviceName,
aadAccessToken).map(token -> {
String refreshToken = token.getRefreshToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(refreshToken);
return new AccessToken(refreshToken, expirationTime);
});
}
} | class TokenServiceImpl {
private final AuthenticationsImpl authenticationsImpl;
private static final String REFRESHTOKEN_GRANTTYPE = "refresh_token";
private static final String ACCESSTOKEN_GRANTTYPE = "access_token";
/**
* Creates an instance of the token service impl class.TokenServiceImpl.java
*
* @param url the service endpoint.
* @param apiVersion the api-version of the service being targeted.
* @param pipeline the pipeline to use to make the call.
* @param serializerAdapter the serializer adapter for the rest client.
*/
/**
* Gets the ACR access token.
*
* @param acrRefreshToken Given the ACRs refresh token.
* @param scope - Token scope.
* @param serviceName The name of the service.
*/
public Mono<AccessToken> getAcrAccessTokenAsync(String acrRefreshToken, String scope, String serviceName, TokenGrantType grantType) {
return withContext(context -> this.authenticationsImpl.exchangeAcrRefreshTokenForAcrAccessTokenWithResponseAsync(serviceName, scope, acrRefreshToken, grantType, context)
.flatMap(response -> {
AcrAccessToken token = response.getValue();
if (token != null) {
String accessToken = token.getAccessToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(accessToken);
return Mono.just(new AccessToken(accessToken, expirationTime));
}
return Mono.empty();
}));
}
/**
* Gets an ACR refresh token.
*
* @param aadAccessToken Given the ACR access token.
* @param serviceName Given the ACR service.
*/
public Mono<AccessToken> getAcrRefreshTokenAsync(String aadAccessToken, String serviceName) {
return withContext(context -> this.authenticationsImpl.exchangeAadAccessTokenForAcrRefreshTokenWithResponseAsync(PostContentSchemaGrantType.ACCESS_TOKEN, serviceName, null, null, aadAccessToken, context).flatMap(response -> {
AcrRefreshToken token = response.getValue();
if (token != null) {
String refreshToken = token.getRefreshToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(refreshToken);
return Mono.just(new AccessToken(refreshToken, expirationTime));
}
return Mono.empty();
}));
}
} | |
This should be returned as a `Mono.error`, same for other locations in this file that throw #Resolved | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
throw logger.logExceptionAsError(new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new RuntimeException(exception.getMessage()));
}
} | throw logger.logExceptionAsError(new NullPointerException("'manifest' can't be null.")); | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final HttpPipeline httpPipeline;
private final String endpoint;
private final String apiVersion;
private final String repositoryName;
private final String registryLoginServer;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.httpPipeline = httpPipeline;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
this.apiVersion = version;
try {
URL endpointUrl = new URL(endpoint);
this.registryLoginServer = endpointUrl.getHost();
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
throw logger.logExceptionAsError(new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
throw logger.logExceptionAsError(new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
throw logger.logExceptionAsError(new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
dig,
playableFlux.flush(),
playableFlux.getSize(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context))
.flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
throw logger.logExceptionAsError(new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
throw logger.logExceptionAsError(new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
AtomicReference<String> digest = new AtomicReference<>();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> {
digest.set(dig);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context);
})
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
throw logger.logExceptionAsError(new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
throw logger.logExceptionAsError(new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
Response<DownloadBlobResult> blobResult = new ResponseBase<>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
null,
new DownloadBlobResult()
.setContent(streamResponse.getValue())
.setDigest(resDigest));
return Mono.just(blobResult);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
throw logger.logExceptionAsError(new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
static final class BufferedFlux {
int size;
ByteBuffer byteBuffer;
int getSize() {
return this.size;
}
String getDigest() {
return UtilsImpl.computeDigest(byteBuffer);
}
Flux<Void> write(ByteBuffer buffer) {
size += buffer.remaining();
byteBuffer = buffer;
return Flux.empty();
}
Flux<ByteBuffer> flush() {
return Flux.just(byteBuffer);
}
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
That internally creates a new buffer. Considering this can be a huge byte array, I would rather be memory conscious. Are there any downsides to moving the position? | public static String byteArrayToHex(byte[] a) {
StringBuilder sb = new StringBuilder(a.length * 2);
for (byte b: a) {
sb.append(String.format("%02x", b));
}
return sb.toString();
} | for (byte b: a) { | public static String byteArrayToHex(byte[] a) {
StringBuilder sb = new StringBuilder(a.length * 2);
for (byte b: a) {
sb.append(String.format("%02x", b));
}
return sb.toString();
} | class UtilsImpl {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
private static final int HTTP_STATUS_CODE_NOT_FOUND = 404;
private static final int HTTP_STATUS_CODE_ACCEPTED = 202;
public static final String OCI_MANIFEST_MEDIA_TYPE;
public static final String DOCKER_DIGEST_HEADER_NAME;
static {
Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json";
DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest";
}
/**
* This method builds the httpPipeline for the builders.
* @param clientOptions The client options
* @param logOptions http log options.
* @param configuration configuration settings.
* @param retryPolicy retry policy
* @param credential credentials.
* @param perCallPolicies per call policies.
* @param perRetryPolicies per retry policies.
* @param httpClient http client
* @param endpoint endpoint to be called
* @param serviceVersion the service api version being targeted by the client.
* @return returns the httpPipeline to be consumed by the builders.
*/
public static HttpPipeline buildHttpPipeline(
ClientOptions clientOptions,
HttpLogOptions logOptions,
Configuration configuration,
RetryPolicy retryPolicy,
TokenCredential credential,
ContainerRegistryAudience audience,
List<HttpPipelinePolicy> perCallPolicies,
List<HttpPipelinePolicy> perRetryPolicies,
HttpClient httpClient,
String endpoint,
ContainerRegistryServiceVersion serviceVersion,
ClientLogger logger) {
ArrayList<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(
new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy);
policies.add(new CookiePolicy());
policies.add(new AddDatePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions);
if (credential == null) {
logger.verbose("Credentials are null, enabling anonymous access");
}
ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies);
credentialPolicies.add(loggingPolicy);
ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(
credential,
audience,
endpoint,
serviceVersion,
new HttpPipelineBuilder()
.policies(credentialPolicies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build(),
JacksonAdapter.createDefaultSerializerAdapter());
ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService);
policies.add(credentialsPolicy);
policies.add(loggingPolicy);
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) {
ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>();
for (HttpPipelinePolicy policy:policies) {
clonedPolicy.add(policy);
}
return clonedPolicy;
}
/**
* This method computes the digest for the buffer content.
* Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build.
* @param buffer The buffer containing the image bytes.
* @return SHA-256 digest for the given buffer.
*/
public static String computeDigest(ByteBuffer buffer) {
int startPosition = buffer.position();
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(buffer);
byte[] digest = md.digest();
return "sha256:" + byteArrayToHex(digest);
} catch (NoSuchAlgorithmException e) {
}
finally {
buffer.position(startPosition);
}
return null;
}
/**
* Delete operation should be idempotent.
* And so should result in a success in case the service response is 400 : Not found.
* @param responseT The response object.
* @param <T> The encapsulating value.
* @return The transformed response object.
*/
public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) {
if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) {
return getAcceptedDeleteResponse(responseT, responseT.getStatusCode());
}
return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED);
}
static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) {
return Mono.just(new ResponseBase<String, Void>(
responseT.getRequest(),
statusCode,
responseT.getHeaders(),
null,
null));
}
/**
* This method converts the API response codes into well known exceptions.
* @param exception The exception returned by the rest client.
* @return The exception returned by the public methods.
*/
public static Throwable mapException(Throwable exception) {
AcrErrorsException acrException = null;
if (exception instanceof AcrErrorsException) {
acrException = ((AcrErrorsException) exception);
} else if (exception instanceof RuntimeException) {
RuntimeException runtimeException = (RuntimeException) exception;
Throwable throwable = runtimeException.getCause();
if (throwable instanceof AcrErrorsException) {
acrException = (AcrErrorsException) throwable;
}
}
if (acrException == null) {
return exception;
}
final HttpResponse errorHttpResponse = acrException.getResponse();
final int statusCode = errorHttpResponse.getStatusCode();
final String errorDetail = acrException.getMessage();
switch (statusCode) {
case 401:
return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception);
case 404:
return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception);
case 409:
return new ResourceExistsException(errorDetail, acrException.getResponse(), exception);
case 412:
return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception);
default:
return new HttpResponseException(errorDetail, acrException.getResponse(), exception);
}
}
} | class UtilsImpl {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
private static final int HTTP_STATUS_CODE_NOT_FOUND;
private static final int HTTP_STATUS_CODE_ACCEPTED;
private static final String CONTINUATION_LINK_HEADER_NAME;
private static final Pattern CONTINUATION_LINK_PATTERN;
public static final String OCI_MANIFEST_MEDIA_TYPE;
public static final String DOCKER_DIGEST_HEADER_NAME;
public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE;
static {
Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
HTTP_STATUS_CODE_NOT_FOUND = 404;
HTTP_STATUS_CODE_ACCEPTED = 202;
OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json";
DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest";
CONTINUATION_LINK_HEADER_NAME = "Link";
CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*");
CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry";
}
private UtilsImpl() { }
/**
* This method builds the httpPipeline for the builders.
* @param clientOptions The client options
* @param logOptions http log options.
* @param configuration configuration settings.
* @param retryPolicy retry policy
* @param retryOptions retry options
* @param credential credentials.
* @param perCallPolicies per call policies.
* @param perRetryPolicies per retry policies.
* @param httpClient http client
* @param endpoint endpoint to be called
* @param serviceVersion the service api version being targeted by the client.
* @return returns the httpPipeline to be consumed by the builders.
*/
public static HttpPipeline buildHttpPipeline(
ClientOptions clientOptions,
HttpLogOptions logOptions,
Configuration configuration,
RetryPolicy retryPolicy,
RetryOptions retryOptions,
TokenCredential credential,
ContainerRegistryAudience audience,
List<HttpPipelinePolicy> perCallPolicies,
List<HttpPipelinePolicy> perRetryPolicies,
HttpClient httpClient,
String endpoint,
ContainerRegistryServiceVersion serviceVersion,
ClientLogger logger) {
ArrayList<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(
new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(new CookiePolicy());
policies.add(new AddDatePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions);
if (credential == null) {
logger.verbose("Credentials are null, enabling anonymous access");
}
ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies);
credentialPolicies.add(loggingPolicy);
if (audience == null) {
audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD;
}
ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(
credential,
audience,
endpoint,
serviceVersion,
new HttpPipelineBuilder()
.policies(credentialPolicies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build(),
JacksonAdapter.createDefaultSerializerAdapter());
ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService);
policies.add(credentialsPolicy);
policies.add(loggingPolicy);
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) {
ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>();
for (HttpPipelinePolicy policy:policies) {
clonedPolicy.add(policy);
}
return clonedPolicy;
}
/**
* This method computes the digest for the buffer content.
* Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build.
* @param buffer The buffer containing the image bytes.
* @return SHA-256 digest for the given buffer.
*/
public static String computeDigest(ByteBuffer buffer) {
ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer();
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(readOnlyBuffer);
byte[] digest = md.digest();
return "sha256:" + byteArrayToHex(digest);
} catch (NoSuchAlgorithmException e) {
}
return null;
}
/**
* Delete operation should be idempotent.
* And so should result in a success in case the service response is 400 : Not found.
* @param responseT The response object.
* @param <T> The encapsulating value.
* @return The transformed response object.
*/
public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) {
if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) {
return getAcceptedDeleteResponse(responseT, responseT.getStatusCode());
}
return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED);
}
static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) {
return Mono.just(new ResponseBase<String, Void>(
responseT.getRequest(),
statusCode,
responseT.getHeaders(),
null,
null));
}
/**
* This method converts the API response codes into well known exceptions.
* @param exception The exception returned by the rest client.
* @return The exception returned by the public methods.
*/
public static Throwable mapException(Throwable exception) {
AcrErrorsException acrException = null;
if (exception instanceof AcrErrorsException) {
acrException = ((AcrErrorsException) exception);
} else if (exception instanceof RuntimeException) {
RuntimeException runtimeException = (RuntimeException) exception;
Throwable throwable = runtimeException.getCause();
if (throwable instanceof AcrErrorsException) {
acrException = (AcrErrorsException) throwable;
}
}
if (acrException == null) {
return exception;
}
final HttpResponse errorHttpResponse = acrException.getResponse();
final int statusCode = errorHttpResponse.getStatusCode();
final String errorDetail = acrException.getMessage();
switch (statusCode) {
case 401:
return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception);
case 404:
return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception);
case 409:
return new ResourceExistsException(errorDetail, acrException.getResponse(), exception);
case 412:
return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception);
default:
return new HttpResponseException(errorDetail, acrException.getResponse(), exception);
}
}
/**
* This method parses the response to get the continuation token used to make the next pagination call.
* The continuation token is returned by the service in the form of a header and not as a nextLink field.
* @param listResponse response that is parsed.
* @param <T> the model type that is being operated on.
* @return paged response with the correct continuation token.
*/
public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) {
return getPagedResponseWithContinuationToken(listResponse, values -> values);
}
/**
* This method parses the response to get the continuation token used to make the next pagination call.
* The continuation token is returned by the service in the form of a header and not as a nextLink field.
*
* <p>
* Per the Docker v2 HTTP API spec, the Link header is an RFC5988
* compliant rel='next' with URL to next result set, if available.
* See: https:
*
* The URI reference can be obtained from link-value as follows:
* Link = "Link" ":"
* link-value = "<" URI-Reference ">" * (";" link-param )
* See: https:
* </p>
* @param listResponse response that is parsed.
* @param mapperFunction the function that maps the rest api response into the public model exposed by the client.
* @param <T> The model type returned by the rest client.
* @param <R> The model type returned by the public client.
* @return paged response with the correct continuation token.
*/
public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) {
Objects.requireNonNull(mapperFunction);
String continuationLink = null;
HttpHeaders headers = listResponse.getHeaders();
if (headers != null) {
String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME);
if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) {
Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader);
if (matcher.matches()) {
if (matcher.groupCount() == 1) {
continuationLink = matcher.group(1);
}
}
}
}
List<T> values = mapperFunction.apply(listResponse.getValue());
return new PagedResponseBase<String, T>(
listResponse.getRequest(),
listResponse.getStatusCode(),
listResponse.getHeaders(),
values,
continuationLink,
null
);
}
} |
Consider using `UncheckedIOException` when wrapping `IOException`. #Resolved | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new RuntimeException(exception.getMessage()));
}
} | return monoError(logger, new RuntimeException(exception.getMessage())); | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
dig,
playableFlux.flush(),
playableFlux.getSize(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context))
.flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
AtomicReference<String> digest = new AtomicReference<>();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> {
digest.set(dig);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context);
})
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
Response<DownloadBlobResult> blobResult = new ResponseBase<>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
null,
new DownloadBlobResult()
.setContent(streamResponse.getValue())
.setDigest(resDigest));
return Mono.just(blobResult);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
static final class BufferedFlux {
int size;
ByteBuffer byteBuffer;
int getSize() {
return this.size;
}
String getDigest() {
return UtilsImpl.computeDigest(byteBuffer);
}
Flux<Void> write(ByteBuffer buffer) {
size += buffer.remaining();
byteBuffer = buffer;
return Flux.empty();
}
Flux<ByteBuffer> flush() {
return Flux.just(byteBuffer);
}
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
In this swagger version yes. They are working on another version where they will be adding this back. | public TokenServiceImpl(String url, ContainerRegistryServiceVersion apiVersion, HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
if (serializerAdapter == null) {
serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
this.authenticationsImpl = new AuthenticationsImpl(url, pipeline, serializerAdapter);
} | public TokenServiceImpl(String url, ContainerRegistryServiceVersion apiVersion, HttpPipeline pipeline, SerializerAdapter serializerAdapter) {
if (serializerAdapter == null) {
serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
if (apiVersion == null) {
apiVersion = ContainerRegistryServiceVersion.getLatest();
}
this.authenticationsImpl = new AuthenticationsImpl(url, apiVersion.getVersion(), pipeline, serializerAdapter);
} | class TokenServiceImpl {
private final AuthenticationsImpl authenticationsImpl;
private static final String REFRESHTOKEN_GRANTTYPE = "refresh_token";
private static final String ACCESSTOKEN_GRANTTYPE = "access_token";
/**
* Creates an instance of the token service impl class.TokenServiceImpl.java
* @param url the service endpoint.
* @param apiVersion the api-version of the service being targeted.
* @param pipeline the pipeline to use to make the call.
* @param serializerAdapter the serializer adapter for the rest client.
*
*/
/**
* Gets the ACR access token.
* @param acrRefreshToken Given the ACRs refresh token.
* @param scope - Token scope.
* @param serviceName The name of the service.
*
*/
public Mono<AccessToken> getAcrAccessTokenAsync(String acrRefreshToken, String scope, String serviceName, String grantType) {
return this.authenticationsImpl.exchangeAcrRefreshTokenForAcrAccessTokenAsync(serviceName, scope, grantType, acrRefreshToken)
.map(token -> {
String accessToken = token.getAccessToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(accessToken);
return new AccessToken(accessToken, expirationTime);
});
}
/**
* Gets an ACR refresh token.
* @param aadAccessToken Given the ACR access token.
* @param serviceName Given the ACR service.
*
*/
public Mono<AccessToken> getAcrRefreshTokenAsync(String aadAccessToken, String serviceName) {
return this.authenticationsImpl.exchangeAadAccessTokenForAcrRefreshTokenAsync(
serviceName,
aadAccessToken).map(token -> {
String refreshToken = token.getRefreshToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(refreshToken);
return new AccessToken(refreshToken, expirationTime);
});
}
} | class TokenServiceImpl {
private final AuthenticationsImpl authenticationsImpl;
private static final String REFRESHTOKEN_GRANTTYPE = "refresh_token";
private static final String ACCESSTOKEN_GRANTTYPE = "access_token";
/**
* Creates an instance of the token service impl class.TokenServiceImpl.java
*
* @param url the service endpoint.
* @param apiVersion the api-version of the service being targeted.
* @param pipeline the pipeline to use to make the call.
* @param serializerAdapter the serializer adapter for the rest client.
*/
/**
* Gets the ACR access token.
*
* @param acrRefreshToken Given the ACRs refresh token.
* @param scope - Token scope.
* @param serviceName The name of the service.
*/
public Mono<AccessToken> getAcrAccessTokenAsync(String acrRefreshToken, String scope, String serviceName, TokenGrantType grantType) {
return withContext(context -> this.authenticationsImpl.exchangeAcrRefreshTokenForAcrAccessTokenWithResponseAsync(serviceName, scope, acrRefreshToken, grantType, context)
.flatMap(response -> {
AcrAccessToken token = response.getValue();
if (token != null) {
String accessToken = token.getAccessToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(accessToken);
return Mono.just(new AccessToken(accessToken, expirationTime));
}
return Mono.empty();
}));
}
/**
* Gets an ACR refresh token.
*
* @param aadAccessToken Given the ACR access token.
* @param serviceName Given the ACR service.
*/
public Mono<AccessToken> getAcrRefreshTokenAsync(String aadAccessToken, String serviceName) {
return withContext(context -> this.authenticationsImpl.exchangeAadAccessTokenForAcrRefreshTokenWithResponseAsync(PostContentSchemaGrantType.ACCESS_TOKEN, serviceName, null, null, aadAccessToken, context).flatMap(response -> {
AcrRefreshToken token = response.getValue();
if (token != null) {
String refreshToken = token.getRefreshToken();
OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(refreshToken);
return Mono.just(new AccessToken(refreshToken, expirationTime));
}
return Mono.empty();
}));
}
} | |
`buffer.asReadOnlyBuffer()` only creates an instance of ByteBuffer wrapper which will still be [backed by the same underlying byte array](https://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html#asReadOnlyBuffer()). So, the size of the byte array should not have an impact on using `read-only` view and it's a lot safer than to modify the start position. | public static String byteArrayToHex(byte[] a) {
StringBuilder sb = new StringBuilder(a.length * 2);
for (byte b: a) {
sb.append(String.format("%02x", b));
}
return sb.toString();
} | for (byte b: a) { | public static String byteArrayToHex(byte[] a) {
StringBuilder sb = new StringBuilder(a.length * 2);
for (byte b: a) {
sb.append(String.format("%02x", b));
}
return sb.toString();
} | class UtilsImpl {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
private static final int HTTP_STATUS_CODE_NOT_FOUND = 404;
private static final int HTTP_STATUS_CODE_ACCEPTED = 202;
public static final String OCI_MANIFEST_MEDIA_TYPE;
public static final String DOCKER_DIGEST_HEADER_NAME;
static {
Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json";
DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest";
}
/**
* This method builds the httpPipeline for the builders.
* @param clientOptions The client options
* @param logOptions http log options.
* @param configuration configuration settings.
* @param retryPolicy retry policy
* @param credential credentials.
* @param perCallPolicies per call policies.
* @param perRetryPolicies per retry policies.
* @param httpClient http client
* @param endpoint endpoint to be called
* @param serviceVersion the service api version being targeted by the client.
* @return returns the httpPipeline to be consumed by the builders.
*/
public static HttpPipeline buildHttpPipeline(
ClientOptions clientOptions,
HttpLogOptions logOptions,
Configuration configuration,
RetryPolicy retryPolicy,
TokenCredential credential,
ContainerRegistryAudience audience,
List<HttpPipelinePolicy> perCallPolicies,
List<HttpPipelinePolicy> perRetryPolicies,
HttpClient httpClient,
String endpoint,
ContainerRegistryServiceVersion serviceVersion,
ClientLogger logger) {
ArrayList<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(
new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy);
policies.add(new CookiePolicy());
policies.add(new AddDatePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions);
if (credential == null) {
logger.verbose("Credentials are null, enabling anonymous access");
}
ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies);
credentialPolicies.add(loggingPolicy);
ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(
credential,
audience,
endpoint,
serviceVersion,
new HttpPipelineBuilder()
.policies(credentialPolicies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build(),
JacksonAdapter.createDefaultSerializerAdapter());
ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService);
policies.add(credentialsPolicy);
policies.add(loggingPolicy);
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) {
ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>();
for (HttpPipelinePolicy policy:policies) {
clonedPolicy.add(policy);
}
return clonedPolicy;
}
/**
* This method computes the digest for the buffer content.
* Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build.
* @param buffer The buffer containing the image bytes.
* @return SHA-256 digest for the given buffer.
*/
public static String computeDigest(ByteBuffer buffer) {
int startPosition = buffer.position();
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(buffer);
byte[] digest = md.digest();
return "sha256:" + byteArrayToHex(digest);
} catch (NoSuchAlgorithmException e) {
}
finally {
buffer.position(startPosition);
}
return null;
}
/**
* Delete operation should be idempotent.
* And so should result in a success in case the service response is 400 : Not found.
* @param responseT The response object.
* @param <T> The encapsulating value.
* @return The transformed response object.
*/
public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) {
if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) {
return getAcceptedDeleteResponse(responseT, responseT.getStatusCode());
}
return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED);
}
static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) {
return Mono.just(new ResponseBase<String, Void>(
responseT.getRequest(),
statusCode,
responseT.getHeaders(),
null,
null));
}
/**
* This method converts the API response codes into well known exceptions.
* @param exception The exception returned by the rest client.
* @return The exception returned by the public methods.
*/
public static Throwable mapException(Throwable exception) {
AcrErrorsException acrException = null;
if (exception instanceof AcrErrorsException) {
acrException = ((AcrErrorsException) exception);
} else if (exception instanceof RuntimeException) {
RuntimeException runtimeException = (RuntimeException) exception;
Throwable throwable = runtimeException.getCause();
if (throwable instanceof AcrErrorsException) {
acrException = (AcrErrorsException) throwable;
}
}
if (acrException == null) {
return exception;
}
final HttpResponse errorHttpResponse = acrException.getResponse();
final int statusCode = errorHttpResponse.getStatusCode();
final String errorDetail = acrException.getMessage();
switch (statusCode) {
case 401:
return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception);
case 404:
return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception);
case 409:
return new ResourceExistsException(errorDetail, acrException.getResponse(), exception);
case 412:
return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception);
default:
return new HttpResponseException(errorDetail, acrException.getResponse(), exception);
}
}
} | class UtilsImpl {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
private static final int HTTP_STATUS_CODE_NOT_FOUND;
private static final int HTTP_STATUS_CODE_ACCEPTED;
private static final String CONTINUATION_LINK_HEADER_NAME;
private static final Pattern CONTINUATION_LINK_PATTERN;
public static final String OCI_MANIFEST_MEDIA_TYPE;
public static final String DOCKER_DIGEST_HEADER_NAME;
public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE;
static {
Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
HTTP_STATUS_CODE_NOT_FOUND = 404;
HTTP_STATUS_CODE_ACCEPTED = 202;
OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json";
DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest";
CONTINUATION_LINK_HEADER_NAME = "Link";
CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*");
CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry";
}
private UtilsImpl() { }
/**
* This method builds the httpPipeline for the builders.
* @param clientOptions The client options
* @param logOptions http log options.
* @param configuration configuration settings.
* @param retryPolicy retry policy
* @param retryOptions retry options
* @param credential credentials.
* @param perCallPolicies per call policies.
* @param perRetryPolicies per retry policies.
* @param httpClient http client
* @param endpoint endpoint to be called
* @param serviceVersion the service api version being targeted by the client.
* @return returns the httpPipeline to be consumed by the builders.
*/
public static HttpPipeline buildHttpPipeline(
ClientOptions clientOptions,
HttpLogOptions logOptions,
Configuration configuration,
RetryPolicy retryPolicy,
RetryOptions retryOptions,
TokenCredential credential,
ContainerRegistryAudience audience,
List<HttpPipelinePolicy> perCallPolicies,
List<HttpPipelinePolicy> perRetryPolicies,
HttpClient httpClient,
String endpoint,
ContainerRegistryServiceVersion serviceVersion,
ClientLogger logger) {
ArrayList<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(
new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions));
policies.add(new CookiePolicy());
policies.add(new AddDatePolicy());
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions);
if (credential == null) {
logger.verbose("Credentials are null, enabling anonymous access");
}
ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies);
credentialPolicies.add(loggingPolicy);
if (audience == null) {
audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD;
}
ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(
credential,
audience,
endpoint,
serviceVersion,
new HttpPipelineBuilder()
.policies(credentialPolicies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build(),
JacksonAdapter.createDefaultSerializerAdapter());
ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService);
policies.add(credentialsPolicy);
policies.add(loggingPolicy);
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) {
ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>();
for (HttpPipelinePolicy policy:policies) {
clonedPolicy.add(policy);
}
return clonedPolicy;
}
/**
* This method computes the digest for the buffer content.
* Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build.
* @param buffer The buffer containing the image bytes.
* @return SHA-256 digest for the given buffer.
*/
public static String computeDigest(ByteBuffer buffer) {
ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer();
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(readOnlyBuffer);
byte[] digest = md.digest();
return "sha256:" + byteArrayToHex(digest);
} catch (NoSuchAlgorithmException e) {
}
return null;
}
/**
* Delete operation should be idempotent.
* And so should result in a success in case the service response is 400 : Not found.
* @param responseT The response object.
* @param <T> The encapsulating value.
* @return The transformed response object.
*/
public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) {
if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) {
return getAcceptedDeleteResponse(responseT, responseT.getStatusCode());
}
return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED);
}
static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) {
return Mono.just(new ResponseBase<String, Void>(
responseT.getRequest(),
statusCode,
responseT.getHeaders(),
null,
null));
}
/**
* This method converts the API response codes into well known exceptions.
* @param exception The exception returned by the rest client.
* @return The exception returned by the public methods.
*/
public static Throwable mapException(Throwable exception) {
AcrErrorsException acrException = null;
if (exception instanceof AcrErrorsException) {
acrException = ((AcrErrorsException) exception);
} else if (exception instanceof RuntimeException) {
RuntimeException runtimeException = (RuntimeException) exception;
Throwable throwable = runtimeException.getCause();
if (throwable instanceof AcrErrorsException) {
acrException = (AcrErrorsException) throwable;
}
}
if (acrException == null) {
return exception;
}
final HttpResponse errorHttpResponse = acrException.getResponse();
final int statusCode = errorHttpResponse.getStatusCode();
final String errorDetail = acrException.getMessage();
switch (statusCode) {
case 401:
return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception);
case 404:
return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception);
case 409:
return new ResourceExistsException(errorDetail, acrException.getResponse(), exception);
case 412:
return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception);
default:
return new HttpResponseException(errorDetail, acrException.getResponse(), exception);
}
}
/**
* This method parses the response to get the continuation token used to make the next pagination call.
* The continuation token is returned by the service in the form of a header and not as a nextLink field.
* @param listResponse response that is parsed.
* @param <T> the model type that is being operated on.
* @return paged response with the correct continuation token.
*/
public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) {
return getPagedResponseWithContinuationToken(listResponse, values -> values);
}
/**
* This method parses the response to get the continuation token used to make the next pagination call.
* The continuation token is returned by the service in the form of a header and not as a nextLink field.
*
* <p>
* Per the Docker v2 HTTP API spec, the Link header is an RFC5988
* compliant rel='next' with URL to next result set, if available.
* See: https:
*
* The URI reference can be obtained from link-value as follows:
* Link = "Link" ":"
* link-value = "<" URI-Reference ">" * (";" link-param )
* See: https:
* </p>
* @param listResponse response that is parsed.
* @param mapperFunction the function that maps the rest api response into the public model exposed by the client.
* @param <T> The model type returned by the rest client.
* @param <R> The model type returned by the public client.
* @return paged response with the correct continuation token.
*/
public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) {
Objects.requireNonNull(mapperFunction);
String continuationLink = null;
HttpHeaders headers = listResponse.getHeaders();
if (headers != null) {
String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME);
if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) {
Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader);
if (matcher.matches()) {
if (matcher.groupCount() == 1) {
continuationLink = matcher.group(1);
}
}
}
}
List<T> values = mapperFunction.apply(listResponse.getValue());
return new PagedResponseBase<String, T>(
listResponse.getRequest(),
listResponse.getStatusCode(),
listResponse.getHeaders(),
values,
continuationLink,
null
);
}
} |
`audience` should be optional and if not set, we should use the default (public cloud) audience. #Resolved | public ContainerRegistryBlobAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' can't be null");
Objects.requireNonNull(audience, "'audience' can't be null");
ContainerRegistryServiceVersion serviceVersion = (version != null)
? version
: ContainerRegistryServiceVersion.getLatest();
HttpPipeline pipeline = getHttpPipeline();
ContainerRegistryBlobAsyncClient client = new ContainerRegistryBlobAsyncClient(repositoryName, pipeline, endpoint, serviceVersion.getVersion());
return client;
} | Objects.requireNonNull(audience, "'audience' can't be null"); | public ContainerRegistryBlobAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'endpoint' can't be null");
ContainerRegistryServiceVersion serviceVersion = (version != null)
? version
: ContainerRegistryServiceVersion.getLatest();
HttpPipeline pipeline = getHttpPipeline();
ContainerRegistryBlobAsyncClient client = new ContainerRegistryBlobAsyncClient(repositoryName, pipeline, endpoint, serviceVersion.getVersion());
return client;
} | class ContainerRegistryBlobClientBuilder implements
ConfigurationTrait<ContainerRegistryBlobClientBuilder>,
EndpointTrait<ContainerRegistryBlobClientBuilder>,
HttpTrait<ContainerRegistryBlobClientBuilder>,
TokenCredentialTrait<ContainerRegistryBlobClientBuilder> {
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
private String repositoryName;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryBlobClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the repository name for the Azure Container Registry Blob instance.
*
* @param repositoryName The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder repository(String repositoryName) {
this.repositoryName = repositoryName;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "'audience' can't be null");
this.audience = audience;
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryBlobClient} or {@link ContainerRegistryBlobAsyncClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryBlobClientBuilder} object
*/
public ContainerRegistryBlobClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryBlobAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryBlobClientBuilder object.
*/
public ContainerRegistryBlobClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
@Override
public ContainerRegistryBlobClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryBlobClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryBlobClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryBlobAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryBlobAsyncClient} is created.
* <p>
* If {@link | class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} |
If this just returns URL to documentation, we should call this `getDocumentationLink()` or something similar. Users may expect the documentation itself to be returned when they see this method name and `String` return type. #Resolved | public String getDocumentation() {
return this.documentation;
} | } | public String getDocumentation() {
return this.documentation;
} | class OciAnnotations {
/*
* Date and time on which the image was built (string, date-time as defined
* by https:
*/
@JsonProperty(value = "org.opencontainers.image.created")
private OffsetDateTime created;
/*
* Contact details of the people or organization responsible for the image.
*/
@JsonProperty(value = "org.opencontainers.image.authors")
private String authors;
/*
* URL to find more information on the image.
*/
@JsonProperty(value = "org.opencontainers.image.url")
private String url;
/*
* URL to get documentation on the image.
*/
@JsonProperty(value = "org.opencontainers.image.documentation")
private String documentation;
/*
* URL to get source code for building the image.
*/
@JsonProperty(value = "org.opencontainers.image.source")
private String source;
/*
* Version of the packaged software. The version MAY match a label or tag
* in the source code repository, may also be Semantic
* versioning-compatible
*/
@JsonProperty(value = "org.opencontainers.image.version")
private String version;
/*
* Source control revision identifier for the packaged software.
*/
@JsonProperty(value = "org.opencontainers.image.revision")
private String revision;
/*
* Name of the distributing entity, organization or individual.
*/
@JsonProperty(value = "org.opencontainers.image.vendor")
private String vendor;
/*
* License(s) under which contained software is distributed as an SPDX
* License Expression.
*/
@JsonProperty(value = "org.opencontainers.image.licenses")
private String licenses;
/*
* Name of the reference for a target.
*/
@JsonProperty(value = "org.opencontainers.image.ref.name")
private String name;
/*
* Human-readable title of the image
*/
@JsonProperty(value = "org.opencontainers.image.title")
private String title;
/*
* Human-readable description of the software packaged in the image
*/
@JsonProperty(value = "org.opencontainers.image.description")
private String description;
/*
* Additional information provided through arbitrary metadata.
*/
@JsonIgnore private Map<String, Object> additionalProperties;
/**
* Get the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @return the created value.
*/
public OffsetDateTime getCreated() {
return this.created;
}
/**
* Set the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @param created the created value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setCreated(OffsetDateTime created) {
this.created = created;
return this;
}
/**
* Get the authors property: Contact details of the people or organization responsible for the image.
*
* @return the authors value.
*/
public String getAuthors() {
return this.authors;
}
/**
* Set the authors property: Contact details of the people or organization responsible for the image.
*
* @param authors the authors value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAuthors(String authors) {
this.authors = authors;
return this;
}
/**
* Get the url property: URL to find more information on the image.
*
* @return the url value.
*/
public String getUrl() {
return this.url;
}
/**
* Set the url property: URL to find more information on the image.
*
* @param url the url value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setUrl(String url) {
this.url = url;
return this;
}
/**
* Get the documentation property: URL to get documentation on the image.
*
* @return the documentation value.
*/
/**
* Set the documentation property: URL to get documentation on the image.
*
* @param documentation the documentation value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDocumentation(String documentation) {
this.documentation = documentation;
return this;
}
/**
* Get the source property: URL to get source code for building the image.
*
* @return the source value.
*/
public String getSource() {
return this.source;
}
/**
* Set the source property: URL to get source code for building the image.
*
* @param source the source value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setSource(String source) {
this.source = source;
return this;
}
/**
* Get the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @return the version value.
*/
public String getVersion() {
return this.version;
}
/**
* Set the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @param version the version value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVersion(String version) {
this.version = version;
return this;
}
/**
* Get the revision property: Source control revision identifier for the packaged software.
*
* @return the revision value.
*/
public String getRevision() {
return this.revision;
}
/**
* Set the revision property: Source control revision identifier for the packaged software.
*
* @param revision the revision value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setRevision(String revision) {
this.revision = revision;
return this;
}
/**
* Get the vendor property: Name of the distributing entity, organization or individual.
*
* @return the vendor value.
*/
public String getVendor() {
return this.vendor;
}
/**
* Set the vendor property: Name of the distributing entity, organization or individual.
*
* @param vendor the vendor value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVendor(String vendor) {
this.vendor = vendor;
return this;
}
/**
* Get the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @return the licenses value.
*/
public String getLicenses() {
return this.licenses;
}
/**
* Set the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @param licenses the licenses value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setLicenses(String licenses) {
this.licenses = licenses;
return this;
}
/**
* Get the name property: Name of the reference for a target.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: Name of the reference for a target.
*
* @param name the name value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setName(String name) {
this.name = name;
return this;
}
/**
* Get the title property: Human-readable title of the image.
*
* @return the title value.
*/
public String getTitle() {
return this.title;
}
/**
* Set the title property: Human-readable title of the image.
*
* @param title the title value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setTitle(String title) {
this.title = title;
return this;
}
/**
* Get the description property: Human-readable description of the software packaged in the image.
*
* @return the description value.
*/
public String getDescription() {
return this.description;
}
/**
* Set the description property: Human-readable description of the software packaged in the image.
*
* @param description the description value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDescription(String description) {
this.description = description;
return this;
}
/**
* Get the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @return the additionalProperties value.
*/
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
/**
* Set the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @param additionalProperties the additionalProperties value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties;
return this;
}
@JsonAnySetter
void setAdditionalProperties(String key, Object value) {
if (additionalProperties == null) {
additionalProperties = new HashMap<>();
}
additionalProperties.put(key, value);
}
} | class OciAnnotations {
/*
* Date and time on which the image was built (string, date-time as defined
* by https:
*/
@JsonProperty(value = "org.opencontainers.image.created")
private OffsetDateTime created;
/*
* Contact details of the people or organization responsible for the image.
*/
@JsonProperty(value = "org.opencontainers.image.authors")
private String authors;
/*
* URL to find more information on the image.
*/
@JsonProperty(value = "org.opencontainers.image.url")
private String url;
/*
* URL to get documentation on the image.
*/
@JsonProperty(value = "org.opencontainers.image.documentation")
private String documentation;
/*
* URL to get source code for building the image.
*/
@JsonProperty(value = "org.opencontainers.image.source")
private String source;
/*
* Version of the packaged software. The version MAY match a label or tag
* in the source code repository, may also be Semantic
* versioning-compatible
*/
@JsonProperty(value = "org.opencontainers.image.version")
private String version;
/*
* Source control revision identifier for the packaged software.
*/
@JsonProperty(value = "org.opencontainers.image.revision")
private String revision;
/*
* Name of the distributing entity, organization or individual.
*/
@JsonProperty(value = "org.opencontainers.image.vendor")
private String vendor;
/*
* License(s) under which contained software is distributed as an SPDX
* License Expression.
*/
@JsonProperty(value = "org.opencontainers.image.licenses")
private String licenses;
/*
* Name of the reference for a target.
*/
@JsonProperty(value = "org.opencontainers.image.ref.name")
private String name;
/*
* Human-readable title of the image
*/
@JsonProperty(value = "org.opencontainers.image.title")
private String title;
/*
* Human-readable description of the software packaged in the image
*/
@JsonProperty(value = "org.opencontainers.image.description")
private String description;
/*
* Additional information provided through arbitrary metadata.
*/
@JsonIgnore private Map<String, Object> additionalProperties;
/**
* Get the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @return the created value.
*/
public OffsetDateTime getCreated() {
return this.created;
}
/**
* Set the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @param created the created value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setCreated(OffsetDateTime created) {
this.created = created;
return this;
}
/**
* Get the authors property: Contact details of the people or organization responsible for the image.
*
* @return the authors value.
*/
public String getAuthors() {
return this.authors;
}
/**
* Set the authors property: Contact details of the people or organization responsible for the image.
*
* @param authors the authors value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAuthors(String authors) {
this.authors = authors;
return this;
}
/**
* Get the url property: URL to find more information on the image.
*
* @return the url value.
*/
public String getUrl() {
return this.url;
}
/**
* Set the url property: URL to find more information on the image.
*
* @param url the url value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setUrl(String url) {
this.url = url;
return this;
}
/**
* Get the documentation property: URL to get documentation on the image.
*
* @return the documentation value.
*/
/**
* Set the documentation property: URL to get documentation on the image.
*
* @param documentation the documentation value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDocumentation(String documentation) {
this.documentation = documentation;
return this;
}
/**
* Get the source property: URL to get source code for building the image.
*
* @return the source value.
*/
public String getSource() {
return this.source;
}
/**
* Set the source property: URL to get source code for building the image.
*
* @param source the source value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setSource(String source) {
this.source = source;
return this;
}
/**
* Get the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @return the version value.
*/
public String getVersion() {
return this.version;
}
/**
* Set the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @param version the version value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVersion(String version) {
this.version = version;
return this;
}
/**
* Get the revision property: Source control revision identifier for the packaged software.
*
* @return the revision value.
*/
public String getRevision() {
return this.revision;
}
/**
* Set the revision property: Source control revision identifier for the packaged software.
*
* @param revision the revision value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setRevision(String revision) {
this.revision = revision;
return this;
}
/**
* Get the vendor property: Name of the distributing entity, organization or individual.
*
* @return the vendor value.
*/
public String getVendor() {
return this.vendor;
}
/**
* Set the vendor property: Name of the distributing entity, organization or individual.
*
* @param vendor the vendor value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVendor(String vendor) {
this.vendor = vendor;
return this;
}
/**
* Get the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @return the licenses value.
*/
public String getLicenses() {
return this.licenses;
}
/**
* Set the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @param licenses the licenses value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setLicenses(String licenses) {
this.licenses = licenses;
return this;
}
/**
* Get the name property: Name of the reference for a target.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: Name of the reference for a target.
*
* @param name the name value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setName(String name) {
this.name = name;
return this;
}
/**
* Get the title property: Human-readable title of the image.
*
* @return the title value.
*/
public String getTitle() {
return this.title;
}
/**
* Set the title property: Human-readable title of the image.
*
* @param title the title value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setTitle(String title) {
this.title = title;
return this;
}
/**
* Get the description property: Human-readable description of the software packaged in the image.
*
* @return the description value.
*/
public String getDescription() {
return this.description;
}
/**
* Set the description property: Human-readable description of the software packaged in the image.
*
* @param description the description value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDescription(String description) {
this.description = description;
return this;
}
/**
* Get the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @return the additionalProperties value.
*/
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
/**
* Set the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @param additionalProperties the additionalProperties value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties;
return this;
}
@JsonAnySetter
void setAdditionalProperties(String key, Object value) {
if (additionalProperties == null) {
additionalProperties = new HashMap<>();
}
additionalProperties.put(key, value);
}
} |
#### }).onErrorMap(UtilsImpl::mapException); --- Why does this method take `Flux<ByteBuffer>` as input? The public APIs either take `BinaryData` or `OciManifest` which is then converted to `Flux<ByteBuffer>` which may not be required if we use `ByteBuffer` as the param for this method. Then BinaryData can be converted using `BinaryData.toByteBuffer()` and OciManifest can be converted using. ```java ByteBuffer byteBuffer = ByteBuffer.wrap(this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON)); ``` Suggesting this because the doc says that the content will be fully loaded into memory, so, BinaryData can be mapped to ByteBuffer instead of `Flux`. #Resolved | Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
dig,
playableFlux.flush(),
playableFlux.getSize(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context))
.flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
} | new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), | return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new RuntimeException(exception.getMessage()));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
dig,
playableFlux.flush(),
playableFlux.getSize(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context))
.flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
AtomicReference<String> digest = new AtomicReference<>();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> {
digest.set(dig);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context);
})
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
Response<DownloadBlobResult> blobResult = new ResponseBase<>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
null,
new DownloadBlobResult()
.setContent(streamResponse.getValue())
.setDigest(resDigest));
return Mono.just(blobResult);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
static final class BufferedFlux {
int size;
ByteBuffer byteBuffer;
int getSize() {
return this.size;
}
String getDigest() {
return UtilsImpl.computeDigest(byteBuffer);
}
Flux<Void> write(ByteBuffer buffer) {
size += buffer.remaining();
byteBuffer = buffer;
return Flux.empty();
}
Flux<ByteBuffer> flush() {
return Flux.just(byteBuffer);
}
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
`audience` should be optional and `null` should be allowed. #Resolved | public ContainerRegistryBlobClientBuilder audience(ContainerRegistryAudience audience) {
Objects.requireNonNull(audience, "'audience' can't be null");
this.audience = audience;
return this;
} | Objects.requireNonNull(audience, "'audience' can't be null"); | public ContainerRegistryBlobClientBuilder audience(ContainerRegistryAudience audience) {
this.audience = audience;
return this;
} | class ContainerRegistryBlobClientBuilder implements
ConfigurationTrait<ContainerRegistryBlobClientBuilder>,
EndpointTrait<ContainerRegistryBlobClientBuilder>,
HttpTrait<ContainerRegistryBlobClientBuilder>,
TokenCredentialTrait<ContainerRegistryBlobClientBuilder> {
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
private String repositoryName;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
public ContainerRegistryBlobClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the repository name for the Azure Container Registry Blob instance.
*
* @param repositoryName The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder repository(String repositoryName) {
this.repositoryName = repositoryName;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @throws NullPointerException If {@code audience} is null.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryBlobClient} or {@link ContainerRegistryBlobAsyncClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an
* {@code applicationId} using {@link ClientOptions
* the {@link UserAgentPolicy} for telemetry/monitoring purposes.
*
* <p>More About <a href="https:
*
* @param clientOptions {@link ClientOptions}.
*
* @return the updated {@link ContainerRegistryBlobClientBuilder} object
*/
public ContainerRegistryBlobClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, HTTP request or response logging will not happen.</p>
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryBlobAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryBlobClientBuilder object.
*/
public ContainerRegistryBlobClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
*
* @return The updated ContainerRegistryClientBuilder object.
*/
@Override
public ContainerRegistryBlobClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Adds a policy to the set of existing policies.
*
* @param policy The policy for service requests.
* @return The updated ContainerRegistryBlobClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
public ContainerRegistryBlobClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryBlobAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryBlobAsyncClient} is created.
* <p>
* If {@link | class ContainerRegistryBlobClientBuilder implements
ConfigurationTrait<ContainerRegistryBlobClientBuilder>,
EndpointTrait<ContainerRegistryBlobClientBuilder>,
HttpTrait<ContainerRegistryBlobClientBuilder>,
TokenCredentialTrait<ContainerRegistryBlobClientBuilder> {
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobClientBuilder.class);
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private Configuration configuration;
private String endpoint;
private HttpClient httpClient;
private TokenCredential credential;
private HttpPipeline httpPipeline;
private HttpLogOptions httpLogOptions;
private RetryPolicy retryPolicy;
private RetryOptions retryOptions;
private ContainerRegistryServiceVersion version;
private ContainerRegistryAudience audience;
private String repositoryName;
/**
* Sets the service endpoint for the Azure Container Registry instance.
*
* @param endpoint The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
* @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL.
*/
@Override
public ContainerRegistryBlobClientBuilder endpoint(String endpoint) {
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
this.endpoint = endpoint;
return this;
}
/**
* Sets the repository name for the Azure Container Registry Blob instance.
*
* @param repositoryName The URL of the Container Registry instance.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder repository(String repositoryName) {
this.repositoryName = repositoryName;
return this;
}
/**
* Sets the audience for the Azure Container Registry service.
*
* @param audience ARM management scope associated with the given registry.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder credential(TokenCredential credential) {
this.credential = credential;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* If {@code pipeline} is set, all settings other than {@link
* to build {@link ContainerRegistryBlobClient} or {@link ContainerRegistryBlobAsyncClient}.<br>
* </p>
*
* This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials.
* In case you use your own pipeline you will have to create your own credential policy.<br>
*
* {For more information please see <a href="https:
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service version and so
* newer version of the client library may result in moving to a newer service version.
*
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
public ContainerRegistryBlobClientBuilder serviceVersion(ContainerRegistryServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
*
* @return the updated {@link ContainerRegistryBlobClientBuilder} object
* @see HttpClientOptions
*/
@Override
public ContainerRegistryBlobClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* <p>The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store to be used.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests
* to and from the service.
* @return The updated {@link ContainerRegistryBlobClientBuilder} object.
*/
@Override
public ContainerRegistryBlobClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Sets the {@link HttpPipelinePolicy} that is used to retry requests.
* <p>
* The default retry policy will be used if not provided {@link
* build {@link ContainerRegistryBlobAsyncClient}.
*
* @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example,
* {@link RetryPolicy} can be used to retry requests.
*
* @return The updated ContainerRegistryBlobClientBuilder object.
*/
public ContainerRegistryBlobClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
*
* @return The updated ContainerRegistryBlobClientBuilder object.
*/
@Override
public ContainerRegistryBlobClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param policy A {@link HttpPipelinePolicy pipeline policy}.
* @return The updated ContainerRegistryBlobClientBuilder object.
* @throws NullPointerException If {@code policy} is null.
*/
@Override
public ContainerRegistryBlobClientBuilder addPolicy(HttpPipelinePolicy policy) {
Objects.requireNonNull(policy, "'policy' cannot be null.");
if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(policy);
} else {
perRetryPolicies.add(policy);
}
return this;
}
/**
* Creates a {@link ContainerRegistryBlobAsyncClient} based on options set in the Builder. Every time {@code
* buildAsyncClient()} is called a new instance of {@link ContainerRegistryBlobAsyncClient} is created.
* <p>
* If {@link |
Will the blobs ever be greater than 2GB? If so, this method will fail as ByteBuffer has a 2GB limit #Resolved | public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
} | return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); | public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
I am putting all the API feedback in an issue so other languages can also easily follow. [https://github.com/Azure/azure-sdk-for-java/issues/27338](https://github.com/Azure/azure-sdk-for-java/issues/27338) | public String getDocumentation() {
return this.documentation;
} | } | public String getDocumentation() {
return this.documentation;
} | class OciAnnotations {
/*
* Date and time on which the image was built (string, date-time as defined
* by https:
*/
@JsonProperty(value = "org.opencontainers.image.created")
private OffsetDateTime created;
/*
* Contact details of the people or organization responsible for the image.
*/
@JsonProperty(value = "org.opencontainers.image.authors")
private String authors;
/*
* URL to find more information on the image.
*/
@JsonProperty(value = "org.opencontainers.image.url")
private String url;
/*
* URL to get documentation on the image.
*/
@JsonProperty(value = "org.opencontainers.image.documentation")
private String documentation;
/*
* URL to get source code for building the image.
*/
@JsonProperty(value = "org.opencontainers.image.source")
private String source;
/*
* Version of the packaged software. The version MAY match a label or tag
* in the source code repository, may also be Semantic
* versioning-compatible
*/
@JsonProperty(value = "org.opencontainers.image.version")
private String version;
/*
* Source control revision identifier for the packaged software.
*/
@JsonProperty(value = "org.opencontainers.image.revision")
private String revision;
/*
* Name of the distributing entity, organization or individual.
*/
@JsonProperty(value = "org.opencontainers.image.vendor")
private String vendor;
/*
* License(s) under which contained software is distributed as an SPDX
* License Expression.
*/
@JsonProperty(value = "org.opencontainers.image.licenses")
private String licenses;
/*
* Name of the reference for a target.
*/
@JsonProperty(value = "org.opencontainers.image.ref.name")
private String name;
/*
* Human-readable title of the image
*/
@JsonProperty(value = "org.opencontainers.image.title")
private String title;
/*
* Human-readable description of the software packaged in the image
*/
@JsonProperty(value = "org.opencontainers.image.description")
private String description;
/*
* Additional information provided through arbitrary metadata.
*/
@JsonIgnore private Map<String, Object> additionalProperties;
/**
* Get the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @return the created value.
*/
public OffsetDateTime getCreated() {
return this.created;
}
/**
* Set the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @param created the created value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setCreated(OffsetDateTime created) {
this.created = created;
return this;
}
/**
* Get the authors property: Contact details of the people or organization responsible for the image.
*
* @return the authors value.
*/
public String getAuthors() {
return this.authors;
}
/**
* Set the authors property: Contact details of the people or organization responsible for the image.
*
* @param authors the authors value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAuthors(String authors) {
this.authors = authors;
return this;
}
/**
* Get the url property: URL to find more information on the image.
*
* @return the url value.
*/
public String getUrl() {
return this.url;
}
/**
* Set the url property: URL to find more information on the image.
*
* @param url the url value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setUrl(String url) {
this.url = url;
return this;
}
/**
* Get the documentation property: URL to get documentation on the image.
*
* @return the documentation value.
*/
/**
* Set the documentation property: URL to get documentation on the image.
*
* @param documentation the documentation value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDocumentation(String documentation) {
this.documentation = documentation;
return this;
}
/**
* Get the source property: URL to get source code for building the image.
*
* @return the source value.
*/
public String getSource() {
return this.source;
}
/**
* Set the source property: URL to get source code for building the image.
*
* @param source the source value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setSource(String source) {
this.source = source;
return this;
}
/**
* Get the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @return the version value.
*/
public String getVersion() {
return this.version;
}
/**
* Set the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @param version the version value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVersion(String version) {
this.version = version;
return this;
}
/**
* Get the revision property: Source control revision identifier for the packaged software.
*
* @return the revision value.
*/
public String getRevision() {
return this.revision;
}
/**
* Set the revision property: Source control revision identifier for the packaged software.
*
* @param revision the revision value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setRevision(String revision) {
this.revision = revision;
return this;
}
/**
* Get the vendor property: Name of the distributing entity, organization or individual.
*
* @return the vendor value.
*/
public String getVendor() {
return this.vendor;
}
/**
* Set the vendor property: Name of the distributing entity, organization or individual.
*
* @param vendor the vendor value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVendor(String vendor) {
this.vendor = vendor;
return this;
}
/**
* Get the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @return the licenses value.
*/
public String getLicenses() {
return this.licenses;
}
/**
* Set the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @param licenses the licenses value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setLicenses(String licenses) {
this.licenses = licenses;
return this;
}
/**
* Get the name property: Name of the reference for a target.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: Name of the reference for a target.
*
* @param name the name value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setName(String name) {
this.name = name;
return this;
}
/**
* Get the title property: Human-readable title of the image.
*
* @return the title value.
*/
public String getTitle() {
return this.title;
}
/**
* Set the title property: Human-readable title of the image.
*
* @param title the title value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setTitle(String title) {
this.title = title;
return this;
}
/**
* Get the description property: Human-readable description of the software packaged in the image.
*
* @return the description value.
*/
public String getDescription() {
return this.description;
}
/**
* Set the description property: Human-readable description of the software packaged in the image.
*
* @param description the description value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDescription(String description) {
this.description = description;
return this;
}
/**
* Get the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @return the additionalProperties value.
*/
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
/**
* Set the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @param additionalProperties the additionalProperties value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties;
return this;
}
@JsonAnySetter
void setAdditionalProperties(String key, Object value) {
if (additionalProperties == null) {
additionalProperties = new HashMap<>();
}
additionalProperties.put(key, value);
}
} | class OciAnnotations {
/*
* Date and time on which the image was built (string, date-time as defined
* by https:
*/
@JsonProperty(value = "org.opencontainers.image.created")
private OffsetDateTime created;
/*
* Contact details of the people or organization responsible for the image.
*/
@JsonProperty(value = "org.opencontainers.image.authors")
private String authors;
/*
* URL to find more information on the image.
*/
@JsonProperty(value = "org.opencontainers.image.url")
private String url;
/*
* URL to get documentation on the image.
*/
@JsonProperty(value = "org.opencontainers.image.documentation")
private String documentation;
/*
* URL to get source code for building the image.
*/
@JsonProperty(value = "org.opencontainers.image.source")
private String source;
/*
* Version of the packaged software. The version MAY match a label or tag
* in the source code repository, may also be Semantic
* versioning-compatible
*/
@JsonProperty(value = "org.opencontainers.image.version")
private String version;
/*
* Source control revision identifier for the packaged software.
*/
@JsonProperty(value = "org.opencontainers.image.revision")
private String revision;
/*
* Name of the distributing entity, organization or individual.
*/
@JsonProperty(value = "org.opencontainers.image.vendor")
private String vendor;
/*
* License(s) under which contained software is distributed as an SPDX
* License Expression.
*/
@JsonProperty(value = "org.opencontainers.image.licenses")
private String licenses;
/*
* Name of the reference for a target.
*/
@JsonProperty(value = "org.opencontainers.image.ref.name")
private String name;
/*
* Human-readable title of the image
*/
@JsonProperty(value = "org.opencontainers.image.title")
private String title;
/*
* Human-readable description of the software packaged in the image
*/
@JsonProperty(value = "org.opencontainers.image.description")
private String description;
/*
* Additional information provided through arbitrary metadata.
*/
@JsonIgnore private Map<String, Object> additionalProperties;
/**
* Get the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @return the created value.
*/
public OffsetDateTime getCreated() {
return this.created;
}
/**
* Set the created property: Date and time on which the image was built (string, date-time as defined by
* https:
*
* @param created the created value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setCreated(OffsetDateTime created) {
this.created = created;
return this;
}
/**
* Get the authors property: Contact details of the people or organization responsible for the image.
*
* @return the authors value.
*/
public String getAuthors() {
return this.authors;
}
/**
* Set the authors property: Contact details of the people or organization responsible for the image.
*
* @param authors the authors value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAuthors(String authors) {
this.authors = authors;
return this;
}
/**
* Get the url property: URL to find more information on the image.
*
* @return the url value.
*/
public String getUrl() {
return this.url;
}
/**
* Set the url property: URL to find more information on the image.
*
* @param url the url value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setUrl(String url) {
this.url = url;
return this;
}
/**
* Get the documentation property: URL to get documentation on the image.
*
* @return the documentation value.
*/
/**
* Set the documentation property: URL to get documentation on the image.
*
* @param documentation the documentation value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDocumentation(String documentation) {
this.documentation = documentation;
return this;
}
/**
* Get the source property: URL to get source code for building the image.
*
* @return the source value.
*/
public String getSource() {
return this.source;
}
/**
* Set the source property: URL to get source code for building the image.
*
* @param source the source value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setSource(String source) {
this.source = source;
return this;
}
/**
* Get the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @return the version value.
*/
public String getVersion() {
return this.version;
}
/**
* Set the version property: Version of the packaged software. The version MAY match a label or tag in the source
* code repository, may also be Semantic versioning-compatible.
*
* @param version the version value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVersion(String version) {
this.version = version;
return this;
}
/**
* Get the revision property: Source control revision identifier for the packaged software.
*
* @return the revision value.
*/
public String getRevision() {
return this.revision;
}
/**
* Set the revision property: Source control revision identifier for the packaged software.
*
* @param revision the revision value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setRevision(String revision) {
this.revision = revision;
return this;
}
/**
* Get the vendor property: Name of the distributing entity, organization or individual.
*
* @return the vendor value.
*/
public String getVendor() {
return this.vendor;
}
/**
* Set the vendor property: Name of the distributing entity, organization or individual.
*
* @param vendor the vendor value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setVendor(String vendor) {
this.vendor = vendor;
return this;
}
/**
* Get the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @return the licenses value.
*/
public String getLicenses() {
return this.licenses;
}
/**
* Set the licenses property: License(s) under which contained software is distributed as an SPDX License
* Expression.
*
* @param licenses the licenses value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setLicenses(String licenses) {
this.licenses = licenses;
return this;
}
/**
* Get the name property: Name of the reference for a target.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: Name of the reference for a target.
*
* @param name the name value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setName(String name) {
this.name = name;
return this;
}
/**
* Get the title property: Human-readable title of the image.
*
* @return the title value.
*/
public String getTitle() {
return this.title;
}
/**
* Set the title property: Human-readable title of the image.
*
* @param title the title value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setTitle(String title) {
this.title = title;
return this;
}
/**
* Get the description property: Human-readable description of the software packaged in the image.
*
* @return the description value.
*/
public String getDescription() {
return this.description;
}
/**
* Set the description property: Human-readable description of the software packaged in the image.
*
* @param description the description value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setDescription(String description) {
this.description = description;
return this;
}
/**
* Get the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @return the additionalProperties value.
*/
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
/**
* Set the additionalProperties property: Additional information provided through arbitrary metadata.
*
* @param additionalProperties the additionalProperties value to set.
* @return the OciAnnotations object itself.
*/
public OciAnnotations setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties;
return this;
}
@JsonAnySetter
void setAdditionalProperties(String key, Object value) {
if (additionalProperties == null) {
additionalProperties = new HashMap<>();
}
additionalProperties.put(key, value);
}
} |
TIL, thx! | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new RuntimeException(exception.getMessage()));
}
} | return monoError(logger, new RuntimeException(exception.getMessage())); | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
dig,
playableFlux.flush(),
playableFlux.getSize(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context))
.flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
AtomicReference<String> digest = new AtomicReference<>();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> {
digest.set(dig);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context);
})
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
Response<DownloadBlobResult> blobResult = new ResponseBase<>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
null,
new DownloadBlobResult()
.setContent(streamResponse.getValue())
.setDigest(resDigest));
return Mono.just(blobResult);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
static final class BufferedFlux {
int size;
ByteBuffer byteBuffer;
int getSize() {
return this.size;
}
String getDigest() {
return UtilsImpl.computeDigest(byteBuffer);
}
Flux<Void> write(ByteBuffer buffer) {
size += buffer.remaining();
byteBuffer = buffer;
return Flux.empty();
}
Flux<ByteBuffer> flush() {
return Flux.just(byteBuffer);
}
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
Yes, the current implementation is to load it into memory. I did the following for 2 reasons (a) Future proofing. (b) Autorest by default creates Rest API argument as Flux<ByteBuffer>, which I believe can be changed. happy to revert if you think it would be better performance wise. However I expect both of them to be similar - no? | Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
dig,
playableFlux.flush(),
playableFlux.getSize(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context))
.flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
} | new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), | return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new RuntimeException(exception.getMessage()));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
dig,
playableFlux.flush(),
playableFlux.getSize(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context))
.flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
BufferedFlux playableFlux = new BufferedFlux();
AtomicReference<String> digest = new AtomicReference<>();
return data.concatMap(playableFlux::write)
.then(Mono.defer(() -> Mono.just(playableFlux.getDigest())))
.flatMap(dig -> {
digest.set(dig);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context);
})
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
Response<DownloadBlobResult> blobResult = new ResponseBase<>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
null,
new DownloadBlobResult()
.setContent(streamResponse.getValue())
.setDigest(resDigest));
return Mono.just(blobResult);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
static final class BufferedFlux {
int size;
ByteBuffer byteBuffer;
int getSize() {
return this.size;
}
String getDigest() {
return UtilsImpl.computeDigest(byteBuffer);
}
Flux<Void> write(ByteBuffer buffer) {
size += buffer.remaining();
byteBuffer = buffer;
return Flux.empty();
}
Flux<ByteBuffer> flush() {
return Flux.just(byteBuffer);
}
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
Random question, is the digest always needed or can it be optional? #Resolved | Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
} | String digest = UtilsImpl.computeDigest(data); | return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
nit: just use `map` here so you don't need to wrap the value in `Mono.just` #Resolved | Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
} | context).flatMap(response -> { | return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
As of now it is always needed. I have a follow-up bug to review with FC to see whether adding a digest here is worth the performance hit as we have no option but to load everything in memory. | Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
} | String digest = UtilsImpl.computeDigest(data); | return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
Ideally there is no limit on the size of an image. However, since we are reading everything in memory, if we are at a point where we need to support 2GB and above we will have to move to streaming APIs instead. | public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
} | return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); | public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).flatMap(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
```suggestion uploadManifest(BinaryData.fromObject(manifest)); ``` #Resolved | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
} | } | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
This is a bit odd to validate the response from the service. When does the service return a digest that was not requested? #Resolved | Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
} | if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { | return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
Why would the service return a digest that doesn't match the requested digest? #Resolved | Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
} | return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); | return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
Ooh nice, I missed that. Thx. | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
} | } | public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
Yes, this was specifically asked by the service. They want to ensure every docker consumer validates this but they have found customer's not necessarily doing it so they wanted us to put it in the SDK. | Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
} | if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { | return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
Docker explicitly states that one should check for the digest before consuming the bytes for both security, image corruption etc. Service specifically wanted us to add this check since a lot of customers do not validate. | Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
}).onErrorMap(UtilsImpl::mapException);
} | return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); | return monoError(logger, new NullPointerException("'tagOrDigest' can't be null."));
}
return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)
.flatMap(response -> {
String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
ManifestWrapper wrapper = response.getValue();
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
OciManifest ociManifest = new OciManifest()
.setAnnotations(wrapper.getAnnotations())
.setConfig(wrapper.getConfig())
.setLayers(wrapper.getLayers())
.setSchemaVersion(wrapper.getSchemaVersion());
Response<OciManifest> res = new ResponseBase<Void, OciManifest>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ociManifest,
null);
return Mono.just(res);
} else {
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
try {
byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON);
return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono);
} catch (IOException exception) {
return monoError(logger, new UncheckedIOException(exception));
}
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult().setContent(binaryData).setDigest(resDigest),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} | class ContainerRegistryBlobAsyncClient {
private final AzureContainerRegistryImpl registryImplClient;
private final ContainerRegistryBlobsImpl blobsImpl;
private final ContainerRegistriesImpl registriesImpl;
private final String endpoint;
private final String repositoryName;
private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class);
ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
this.repositoryName = repositoryName;
this.endpoint = endpoint;
this.registryImplClient = new AzureContainerRegistryImplBuilder()
.url(endpoint)
.pipeline(httpPipeline)
.apiVersion(version)
.buildClient();
this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs();
this.registriesImpl = this.registryImplClient.getContainerRegistries();
}
/**
* This method returns the registry's repository on which operations are being performed.
*
* @return The name of the repository
*/
public String getRepositoryName() {
return this.repositoryName;
}
/**
* This method returns the complete registry endpoint.
*
* @return The registry endpoint including the authority.
*/
public String getEndpoint() {
return this.endpoint;
}
/**
* Upload the Oci manifest to the repository.
* The upload is done as a single operation.
* @see <a href="https:
* @param manifest The OciManifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code manifest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) {
if (manifest == null) {
return monoError(logger, new NullPointerException("'manifest' can't be null."));
}
return uploadManifest(BinaryData.fromObject(manifest));
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
* @param data The manifest that needs to be uploaded.
* @return operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadManifestResult> uploadManifest(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a manifest to the repository.
* The client currently only supports uploading OciManifests to the repository.
* And this operation makes the assumption that the data provided is a valid OCI manifest.
* <p>
* Also, the data is read into memory and then an upload operation is performed as a single operation.
* @see <a href="https:
*
* @param data The manifest that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.registriesImpl.createManifestWithResponseAsync(
repositoryName,
digest,
Flux.just(data),
data.remaining(),
UtilsImpl.OCI_MANIFEST_MEDIA_TYPE,
context).map(response -> {
Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
response.getDeserializedHeaders());
return res;
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadBlobResult> uploadBlob(BinaryData data) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
}
/**
* Uploads a blob to the repository.
* The client currently uploads the entire blob\layer as a single unit.
* <p>
* The blob is read into memory and then an upload operation is performed as a single operation.
* We currently do not support breaking the layer into multiple chunks and uploading them one at a time
*
* @param data The blob\image content that needs to be uploaded.
* @return The rest response containing the operation result.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) {
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context));
}
Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) {
if (data == null) {
return monoError(logger, new NullPointerException("'data' can't be null."));
}
String digest = UtilsImpl.computeDigest(data);
return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context)
.flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context))
.flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context))
.flatMap(completeUploadResponse -> {
Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(),
completeUploadResponse.getStatusCode(),
completeUploadResponse.getHeaders(),
new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()),
completeUploadResponse.getDeserializedHeaders());
return Mono.just(res);
}).onErrorMap(UtilsImpl::mapException);
}
private String trimNextLink(String locationHeader) {
if (locationHeader.startsWith("/")) {
return locationHeader.substring(1);
}
return locationHeader;
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<OciManifest> downloadManifest(String tagOrDigest) {
return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono);
}
/**
* Download the manifest associated with the given tag or digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param tagOrDigest The tag or digest of the manifest.
* @return The response for the manifest associated with the given tag or digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code tagOrDigest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) {
return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context));
}
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) {
if (tagOrDigest == null) {
).onErrorMap(UtilsImpl::mapException);
}
/**
* Download the blob associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DownloadBlobResult> downloadBlob(String digest) {
return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Download the blob\layer associated with the given digest.
*
* @param digest The digest for the given image layer.
* @return The image associated with the given digest.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) {
return withContext(context -> this.downloadBlobWithResponse(digest, context));
}
Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> {
String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME);
return BinaryData.fromFlux(streamResponse.getValue())
.flatMap(binaryData -> {
Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>(
streamResponse.getRequest(),
streamResponse.getStatusCode(),
streamResponse.getHeaders(),
new DownloadBlobResult(resDigest, binaryData),
null);
return Mono.just(response);
});
}).onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The completion signal.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteBlob(String digest) {
return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the image associated with the given digest
*
* @param digest The digest for the given image layer.
* @return The REST response for the completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteBlobWithResponse(String digest) {
return withContext(context -> deleteBlobWithResponse(digest, context));
}
Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) {
if (digest == null) {
return monoError(logger, new NullPointerException("'digest' can't be null."));
}
return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteManifest(String digest) {
return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono);
}
/**
* Delete the manifest associated with the given digest.
* We currently only support downloading OCI manifests.
*
* @see <a href="https:
*
* @param digest The digest of the manifest.
* @return The REST response for completion.
* @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace.
* @throws NullPointerException thrown if the {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteManifestWithResponse(String digest) {
return withContext(context -> deleteManifestWithResponse(digest, context));
}
Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) {
return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context)
.flatMap(UtilsImpl::deleteResponseToSuccess)
.onErrorMap(UtilsImpl::mapException);
}
} |
I think calling this method every time, whether log empty diagnostics is enabled or not is not good. We should have the check on this method, so that we don't even go into the execution of this method if the flag is disabled (avoids creating unnecessary method stack and saves some small resources and computation). | public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) {
return source.filter(documentProducerFeedResponse -> {
if (documentProducerFeedResponse.pageResult.getResults().isEmpty()
&& !ModelBridgeInternal
.getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) {
tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge());
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics);
cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics();
logEmptyPageDiagnostics(
cosmosDiagnostics,
this.cosmosQueryRequestOptions,
this.correlatedActivityId,
documentProducerFeedResponse.pageResult.getActivityId());
return false;
}
return true;
}).map(documentProducerFeedResponse -> {
if (!emptyPageQueryMetricsMap.isEmpty()) {
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(currentQueryMetrics, emptyPageQueryMetricsMap);
emptyPageQueryMetricsMap.clear();
}
double charge = tracker.getAndResetCharge();
if (charge > 0) {
return new ValueHolder<>(plusCharge(documentProducerFeedResponse,
charge));
} else {
return new ValueHolder<>(documentProducerFeedResponse);
}
}).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> {
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v;
ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>(
this.previousPage,
documentProducerFeedResponse);
this.previousPage = documentProducerFeedResponse;
return previousCurrent;
}).skip(1).map(currentNext -> {
DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left;
DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right;
String compositeContinuationToken;
String backendContinuationToken = current.pageResult.getContinuationToken();
if (backendContinuationToken == null) {
if (next == null) {
compositeContinuationToken = null;
} else {
CompositeContinuationToken compositeContinuationTokenDom =
new CompositeContinuationToken(null, next.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
} else {
CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(
backendContinuationToken,
current.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
DocumentProducer<T>.DocumentProducerFeedResponse page;
page = current;
page = this.addCompositeContinuationToken(page,
compositeContinuationToken);
return page;
}).map(documentProducerFeedResponse -> {
return documentProducerFeedResponse.pageResult;
}).switchIfEmpty(Flux.defer(() -> {
return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(),
headerResponse(tracker.getAndResetCharge()),
emptyPageQueryMetricsMap,
null,
false,
false,
cosmosDiagnostics));
}));
} | logEmptyPageDiagnostics( | Combining previous empty page query metrics with current non empty page query metrics
if (!emptyPageQueryMetricsMap.isEmpty()) {
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(currentQueryMetrics, emptyPageQueryMetricsMap);
emptyPageQueryMetricsMap.clear();
} | class EmptyPagesFilterTransformer<T extends Resource>
implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> {
private final RequestChargeTracker tracker;
private DocumentProducer<T>.DocumentProducerFeedResponse previousPage;
private final CosmosQueryRequestOptions cosmosQueryRequestOptions;
private final UUID correlatedActivityId;
private ConcurrentMap<String, QueryMetrics> emptyPageQueryMetricsMap = new ConcurrentHashMap<>();
private CosmosDiagnostics cosmosDiagnostics;
public EmptyPagesFilterTransformer(RequestChargeTracker tracker, CosmosQueryRequestOptions options, UUID correlatedActivityId) {
if (tracker == null) {
throw new IllegalArgumentException("Request Charge Tracker must not be null.");
}
this.tracker = tracker;
this.previousPage = null;
this.cosmosQueryRequestOptions = options;
this.correlatedActivityId = correlatedActivityId;
}
private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
double charge) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
double pageCharge = page.getRequestCharge();
pageCharge += charge;
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(pageCharge));
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics());
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
String compositeContinuationToken) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
headers.put(HttpConstants.HttpHeaders.CONTINUATION,
compositeContinuationToken);
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics()
);
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private static Map<String, String> headerResponse(
double requestCharge) {
return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(requestCharge));
}
@Override
public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) {
return source.filter(documentProducerFeedResponse -> {
if (documentProducerFeedResponse.pageResult.getResults().isEmpty()
&& !ModelBridgeInternal
.getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) {
tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge());
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics);
cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics();
logEmptyPageDiagnostics(
cosmosDiagnostics,
this.cosmosQueryRequestOptions,
this.correlatedActivityId,
documentProducerFeedResponse.pageResult.getActivityId());
return false;
}
return true;
}).map(documentProducerFeedResponse -> {
double charge = tracker.getAndResetCharge();
if (charge > 0) {
return new ValueHolder<>(plusCharge(documentProducerFeedResponse,
charge));
} else {
return new ValueHolder<>(documentProducerFeedResponse);
}
}).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> {
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v;
ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>(
this.previousPage,
documentProducerFeedResponse);
this.previousPage = documentProducerFeedResponse;
return previousCurrent;
}).skip(1).map(currentNext -> {
DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left;
DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right;
String compositeContinuationToken;
String backendContinuationToken = current.pageResult.getContinuationToken();
if (backendContinuationToken == null) {
if (next == null) {
compositeContinuationToken = null;
} else {
CompositeContinuationToken compositeContinuationTokenDom =
new CompositeContinuationToken(null, next.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
} else {
CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(
backendContinuationToken,
current.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
DocumentProducer<T>.DocumentProducerFeedResponse page;
page = current;
page = this.addCompositeContinuationToken(page,
compositeContinuationToken);
return page;
}).map(documentProducerFeedResponse -> {
return documentProducerFeedResponse.pageResult;
}).switchIfEmpty(Flux.defer(() -> {
return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(),
headerResponse(tracker.getAndResetCharge()),
emptyPageQueryMetricsMap,
null,
false,
false,
cosmosDiagnostics));
}));
}
} | class EmptyPagesFilterTransformer<T extends Resource>
implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> {
private final RequestChargeTracker tracker;
private DocumentProducer<T>.DocumentProducerFeedResponse previousPage;
private final CosmosQueryRequestOptions cosmosQueryRequestOptions;
private final UUID correlatedActivityId;
private ConcurrentMap<String, QueryMetrics> emptyPageQueryMetricsMap = new ConcurrentHashMap<>();
private CosmosDiagnostics cosmosDiagnostics;
public EmptyPagesFilterTransformer(RequestChargeTracker tracker, CosmosQueryRequestOptions options, UUID correlatedActivityId) {
if (tracker == null) {
throw new IllegalArgumentException("Request Charge Tracker must not be null.");
}
this.tracker = tracker;
this.previousPage = null;
this.cosmosQueryRequestOptions = options;
this.correlatedActivityId = correlatedActivityId;
}
private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
double charge) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
double pageCharge = page.getRequestCharge();
pageCharge += charge;
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(pageCharge));
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics());
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
String compositeContinuationToken) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
headers.put(HttpConstants.HttpHeaders.CONTINUATION,
compositeContinuationToken);
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics()
);
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private static Map<String, String> headerResponse(
double requestCharge) {
return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(requestCharge));
}
@Override
public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) {
return source.filter(documentProducerFeedResponse -> {
if (documentProducerFeedResponse.pageResult.getResults().isEmpty()
&& !ModelBridgeInternal
.getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) {
tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge());
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics);
cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics();
if (ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.isEmptyPageDiagnosticsEnabled(cosmosQueryRequestOptions)) {
logEmptyPageDiagnostics(
cosmosDiagnostics,
this.correlatedActivityId,
documentProducerFeedResponse.pageResult.getActivityId());
}
return false;
}
return true;
}).map(documentProducerFeedResponse -> {
double charge = tracker.getAndResetCharge();
if (charge > 0) {
return new ValueHolder<>(plusCharge(documentProducerFeedResponse,
charge));
} else {
return new ValueHolder<>(documentProducerFeedResponse);
}
}).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> {
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v;
ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>(
this.previousPage,
documentProducerFeedResponse);
this.previousPage = documentProducerFeedResponse;
return previousCurrent;
}).skip(1).map(currentNext -> {
DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left;
DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right;
String compositeContinuationToken;
String backendContinuationToken = current.pageResult.getContinuationToken();
if (backendContinuationToken == null) {
if (next == null) {
compositeContinuationToken = null;
} else {
CompositeContinuationToken compositeContinuationTokenDom =
new CompositeContinuationToken(null, next.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
} else {
CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(
backendContinuationToken,
current.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
DocumentProducer<T>.DocumentProducerFeedResponse page;
page = current;
page = this.addCompositeContinuationToken(page,
compositeContinuationToken);
return page;
}).map(documentProducerFeedResponse -> {
return documentProducerFeedResponse.pageResult;
}).switchIfEmpty(Flux.defer(() -> {
return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(),
headerResponse(tracker.getAndResetCharge()),
emptyPageQueryMetricsMap,
null,
false,
false,
cosmosDiagnostics));
}));
}
} |
discussed offline, updated. | public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) {
return source.filter(documentProducerFeedResponse -> {
if (documentProducerFeedResponse.pageResult.getResults().isEmpty()
&& !ModelBridgeInternal
.getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) {
tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge());
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics);
cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics();
logEmptyPageDiagnostics(
cosmosDiagnostics,
this.cosmosQueryRequestOptions,
this.correlatedActivityId,
documentProducerFeedResponse.pageResult.getActivityId());
return false;
}
return true;
}).map(documentProducerFeedResponse -> {
if (!emptyPageQueryMetricsMap.isEmpty()) {
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(currentQueryMetrics, emptyPageQueryMetricsMap);
emptyPageQueryMetricsMap.clear();
}
double charge = tracker.getAndResetCharge();
if (charge > 0) {
return new ValueHolder<>(plusCharge(documentProducerFeedResponse,
charge));
} else {
return new ValueHolder<>(documentProducerFeedResponse);
}
}).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> {
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v;
ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>(
this.previousPage,
documentProducerFeedResponse);
this.previousPage = documentProducerFeedResponse;
return previousCurrent;
}).skip(1).map(currentNext -> {
DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left;
DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right;
String compositeContinuationToken;
String backendContinuationToken = current.pageResult.getContinuationToken();
if (backendContinuationToken == null) {
if (next == null) {
compositeContinuationToken = null;
} else {
CompositeContinuationToken compositeContinuationTokenDom =
new CompositeContinuationToken(null, next.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
} else {
CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(
backendContinuationToken,
current.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
DocumentProducer<T>.DocumentProducerFeedResponse page;
page = current;
page = this.addCompositeContinuationToken(page,
compositeContinuationToken);
return page;
}).map(documentProducerFeedResponse -> {
return documentProducerFeedResponse.pageResult;
}).switchIfEmpty(Flux.defer(() -> {
return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(),
headerResponse(tracker.getAndResetCharge()),
emptyPageQueryMetricsMap,
null,
false,
false,
cosmosDiagnostics));
}));
} | logEmptyPageDiagnostics( | Combining previous empty page query metrics with current non empty page query metrics
if (!emptyPageQueryMetricsMap.isEmpty()) {
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(currentQueryMetrics, emptyPageQueryMetricsMap);
emptyPageQueryMetricsMap.clear();
} | class EmptyPagesFilterTransformer<T extends Resource>
implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> {
private final RequestChargeTracker tracker;
private DocumentProducer<T>.DocumentProducerFeedResponse previousPage;
private final CosmosQueryRequestOptions cosmosQueryRequestOptions;
private final UUID correlatedActivityId;
private ConcurrentMap<String, QueryMetrics> emptyPageQueryMetricsMap = new ConcurrentHashMap<>();
private CosmosDiagnostics cosmosDiagnostics;
public EmptyPagesFilterTransformer(RequestChargeTracker tracker, CosmosQueryRequestOptions options, UUID correlatedActivityId) {
if (tracker == null) {
throw new IllegalArgumentException("Request Charge Tracker must not be null.");
}
this.tracker = tracker;
this.previousPage = null;
this.cosmosQueryRequestOptions = options;
this.correlatedActivityId = correlatedActivityId;
}
private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
double charge) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
double pageCharge = page.getRequestCharge();
pageCharge += charge;
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(pageCharge));
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics());
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
String compositeContinuationToken) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
headers.put(HttpConstants.HttpHeaders.CONTINUATION,
compositeContinuationToken);
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics()
);
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private static Map<String, String> headerResponse(
double requestCharge) {
return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(requestCharge));
}
@Override
public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) {
return source.filter(documentProducerFeedResponse -> {
if (documentProducerFeedResponse.pageResult.getResults().isEmpty()
&& !ModelBridgeInternal
.getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) {
tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge());
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics);
cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics();
logEmptyPageDiagnostics(
cosmosDiagnostics,
this.cosmosQueryRequestOptions,
this.correlatedActivityId,
documentProducerFeedResponse.pageResult.getActivityId());
return false;
}
return true;
}).map(documentProducerFeedResponse -> {
double charge = tracker.getAndResetCharge();
if (charge > 0) {
return new ValueHolder<>(plusCharge(documentProducerFeedResponse,
charge));
} else {
return new ValueHolder<>(documentProducerFeedResponse);
}
}).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> {
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v;
ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>(
this.previousPage,
documentProducerFeedResponse);
this.previousPage = documentProducerFeedResponse;
return previousCurrent;
}).skip(1).map(currentNext -> {
DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left;
DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right;
String compositeContinuationToken;
String backendContinuationToken = current.pageResult.getContinuationToken();
if (backendContinuationToken == null) {
if (next == null) {
compositeContinuationToken = null;
} else {
CompositeContinuationToken compositeContinuationTokenDom =
new CompositeContinuationToken(null, next.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
} else {
CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(
backendContinuationToken,
current.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
DocumentProducer<T>.DocumentProducerFeedResponse page;
page = current;
page = this.addCompositeContinuationToken(page,
compositeContinuationToken);
return page;
}).map(documentProducerFeedResponse -> {
return documentProducerFeedResponse.pageResult;
}).switchIfEmpty(Flux.defer(() -> {
return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(),
headerResponse(tracker.getAndResetCharge()),
emptyPageQueryMetricsMap,
null,
false,
false,
cosmosDiagnostics));
}));
}
} | class EmptyPagesFilterTransformer<T extends Resource>
implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> {
private final RequestChargeTracker tracker;
private DocumentProducer<T>.DocumentProducerFeedResponse previousPage;
private final CosmosQueryRequestOptions cosmosQueryRequestOptions;
private final UUID correlatedActivityId;
private ConcurrentMap<String, QueryMetrics> emptyPageQueryMetricsMap = new ConcurrentHashMap<>();
private CosmosDiagnostics cosmosDiagnostics;
public EmptyPagesFilterTransformer(RequestChargeTracker tracker, CosmosQueryRequestOptions options, UUID correlatedActivityId) {
if (tracker == null) {
throw new IllegalArgumentException("Request Charge Tracker must not be null.");
}
this.tracker = tracker;
this.previousPage = null;
this.cosmosQueryRequestOptions = options;
this.correlatedActivityId = correlatedActivityId;
}
private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
double charge) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
double pageCharge = page.getRequestCharge();
pageCharge += charge;
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(pageCharge));
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics());
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken(
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse,
String compositeContinuationToken) {
FeedResponse<T> page = documentProducerFeedResponse.pageResult;
Map<String, String> headers = new HashMap<>(page.getResponseHeaders());
headers.put(HttpConstants.HttpHeaders.CONTINUATION,
compositeContinuationToken);
FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(),
headers,
BridgeInternal.queryMetricsFromFeedResponse(page),
ModelBridgeInternal.getQueryPlanDiagnosticsContext(page),
false,
false,
page.getCosmosDiagnostics()
);
documentProducerFeedResponse.pageResult = newPage;
return documentProducerFeedResponse;
}
private static Map<String, String> headerResponse(
double requestCharge) {
return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE,
String.valueOf(requestCharge));
}
@Override
public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) {
return source.filter(documentProducerFeedResponse -> {
if (documentProducerFeedResponse.pageResult.getResults().isEmpty()
&& !ModelBridgeInternal
.getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) {
tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge());
ConcurrentMap<String, QueryMetrics> currentQueryMetrics =
BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult);
QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics);
cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics();
if (ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.isEmptyPageDiagnosticsEnabled(cosmosQueryRequestOptions)) {
logEmptyPageDiagnostics(
cosmosDiagnostics,
this.correlatedActivityId,
documentProducerFeedResponse.pageResult.getActivityId());
}
return false;
}
return true;
}).map(documentProducerFeedResponse -> {
double charge = tracker.getAndResetCharge();
if (charge > 0) {
return new ValueHolder<>(plusCharge(documentProducerFeedResponse,
charge));
} else {
return new ValueHolder<>(documentProducerFeedResponse);
}
}).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> {
DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v;
ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>(
this.previousPage,
documentProducerFeedResponse);
this.previousPage = documentProducerFeedResponse;
return previousCurrent;
}).skip(1).map(currentNext -> {
DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left;
DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right;
String compositeContinuationToken;
String backendContinuationToken = current.pageResult.getContinuationToken();
if (backendContinuationToken == null) {
if (next == null) {
compositeContinuationToken = null;
} else {
CompositeContinuationToken compositeContinuationTokenDom =
new CompositeContinuationToken(null, next.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
} else {
CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(
backendContinuationToken,
current.sourceFeedRange.getRange());
compositeContinuationToken = compositeContinuationTokenDom.toJson();
}
DocumentProducer<T>.DocumentProducerFeedResponse page;
page = current;
page = this.addCompositeContinuationToken(page,
compositeContinuationToken);
return page;
}).map(documentProducerFeedResponse -> {
return documentProducerFeedResponse.pageResult;
}).switchIfEmpty(Flux.defer(() -> {
return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(),
headerResponse(tracker.getAndResetCharge()),
emptyPageQueryMetricsMap,
null,
false,
false,
cosmosDiagnostics));
}));
}
} |
Delete unused field: `mockObjectMapper`. | public void setup() {
MockitoAnnotations.openMocks(this);
try {
when(mockClosableHttpResponse.getStatusLine())
.thenReturn(new BasicStatusLine(new ProtocolVersion("", 0, 0), 200, ""));
when(mockClosableHttpResponse.getEntity()).thenReturn(mockHttpEntity);
when(mockHttpEntity.getContent()).thenReturn(mockInputStream);
} catch (Exception e) {
fail();
}
} | when(mockClosableHttpResponse.getEntity()).thenReturn(mockHttpEntity); | public void setup() {
MockitoAnnotations.openMocks(this);
try {
when(mockClosableHttpResponse.getStatusLine())
.thenReturn(new BasicStatusLine(new ProtocolVersion("", 0, 0), 200, ""));
when(mockClosableHttpResponse.getEntity()).thenReturn(mockHttpEntity);
when(mockHttpEntity.getContent()).thenReturn(mockInputStream);
} catch (Exception e) {
fail();
}
} | class AppConfigurationBootstrapConfigurationTest {
private static final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withPropertyValues(propPair(STORE_ENDPOINT_PROP, TEST_STORE_NAME))
.withConfiguration(AutoConfigurations.of(AppConfigurationBootstrapConfiguration.class));
@Mock
private CloseableHttpResponse mockClosableHttpResponse;
@Mock
HttpEntity mockHttpEntity;
@Mock
InputStream mockInputStream;
@Mock
ObjectMapper mockObjectMapper;
@Mock
ClientStore clientStoreMock;
@Before
@Test
public void iniConnectionStringSystemAssigned() throws Exception {
contextRunner.withPropertyValues(propPair(FAIL_FAST_PROP, "false"))
.run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class));
}
@Test
public void iniConnectionStringUserAssigned() throws Exception {
contextRunner
.withPropertyValues(propPair(FAIL_FAST_PROP, "false"),
propPair("spring.cloud.azure.appconfiguration.managed-identity.client-id", "client-id"))
.run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class));
}
@Test
public void propertySourceLocatorBeanCreated() throws Exception {
contextRunner
.withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false"))
.run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class));
}
@Test
public void clientsBeanCreated() throws Exception {
contextRunner
.withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false"))
.run(context -> assertThat(context).hasSingleBean(ClientStore.class));
}
} | class AppConfigurationBootstrapConfigurationTest {
private static final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withPropertyValues(propPair(STORE_ENDPOINT_PROP, TEST_STORE_NAME))
.withConfiguration(AutoConfigurations.of(AppConfigurationBootstrapConfiguration.class));
@Mock
private CloseableHttpResponse mockClosableHttpResponse;
@Mock
HttpEntity mockHttpEntity;
@Mock
InputStream mockInputStream;
@Before
@Test
public void iniConnectionStringSystemAssigned() {
contextRunner.withPropertyValues(propPair(FAIL_FAST_PROP, "false"))
.run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class));
}
@Test
public void iniConnectionStringUserAssigned() {
contextRunner
.withPropertyValues(propPair(FAIL_FAST_PROP, "false"),
propPair("spring.cloud.azure.appconfiguration.managed-identity.client-id", "client-id"))
.run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class));
}
@Test
public void propertySourceLocatorBeanCreated() {
contextRunner
.withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false"))
.run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class));
}
@Test
public void clientsBeanCreated() {
contextRunner
.withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false"))
.run(context -> assertThat(context).hasSingleBean(ClientStore.class));
}
} |
Do we want to sleep during playback mode? | public void canCreateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
final String publicIpDnsLabel = generateRandomResourceName("pip", 20);
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id());
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id());
String secondaryNicName = generateRandomResourceName("nic", 10);
Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm3 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm3.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count());
} | ResourceManagerUtils.sleep(Duration.ofSeconds(10)); | public void canCreateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
final String publicIpDnsLabel = generateRandomResourceName("pip", 20);
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id());
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id());
String secondaryNicName = generateRandomResourceName("nic", 10);
Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm3 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm3.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count());
} | class VirtualMachineOperationsTests extends ComputeManagementTest {
private String rgName = "";
private String rgName2 = "";
private final Region region = Region.US_EAST;
private final Region regionProxPlacementGroup = Region.US_WEST;
private final Region regionProxPlacementGroup2 = Region.US_EAST;
private final String vmName = "javavm";
private final String proxGroupName = "testproxgroup1";
private final String proxGroupName2 = "testproxgroup2";
private final String availabilitySetName = "availset1";
private final String availabilitySetName2 = "availset2";
private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
rgName = generateRandomResourceName("javacsmrg", 15);
rgName2 = generateRandomResourceName("javacsmrg2", 15);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName);
}
}
@Test
public void canCreateVirtualMachineWithNetworking() throws Exception {
NetworkSecurityGroup nsg =
this
.networkManager
.networkSecurityGroups()
.define("nsg")
.withRegion(region)
.withNewResourceGroup(rgName)
.defineRule("rule1")
.allowInbound()
.fromAnyAddress()
.fromPort(80)
.toAnyAddress()
.toPort(80)
.withProtocol(SecurityRuleProtocol.TCP)
.attach()
.create();
Creatable<Network> networkDefinition =
this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/28")
.defineSubnet("subnet1")
.withAddressPrefix("10.0.0.0/29")
.withExistingNetworkSecurityGroup(nsg)
.attach();
VirtualMachine vm =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork(networkDefinition)
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.create();
NetworkInterface primaryNic = vm.getPrimaryNetworkInterface();
Assertions.assertNotNull(primaryNic);
NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration();
Assertions.assertNotNull(primaryIpConfig);
Assertions.assertNotNull(primaryIpConfig.networkId());
Network network = primaryIpConfig.getNetwork();
Assertions.assertNotNull(primaryIpConfig.subnetName());
Subnet subnet = network.subnets().get(primaryIpConfig.subnetName());
Assertions.assertNotNull(subnet);
nsg = subnet.getNetworkSecurityGroup();
Assertions.assertNotNull(nsg);
Assertions.assertEquals("nsg", nsg.name());
Assertions.assertEquals(1, nsg.securityRules().size());
nsg = primaryIpConfig.getNetworkSecurityGroup();
Assertions.assertEquals("nsg", nsg.name());
}
@Test
public void canCreateVirtualMachine() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotCreateVirtualMachineSyncPoll() throws Exception {
final String mySqlInstallScript = "https:
final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x(";
Assertions.assertThrows(IllegalStateException.class, () -> {
Accepted<VirtualMachine> acceptedVirtualMachine =
this.computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.defineNewExtension("CustomScriptForLinux")
.withPublisher("Microsoft.OSTCExtensions")
.withType("CustomScriptForLinux")
.withVersion("1.4")
.withMinorVersionAutoUpgrade()
.withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript))
.withPublicSetting("commandToExecute", installCommand)
.attach()
.beginCreate();
});
boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName);
Assertions.assertFalse(dependentResourceCreated);
rgName = null;
}
@Test
public void canCreateVirtualMachineSyncPoll() throws Exception {
final long defaultDelayInMillis = 10 * 1000;
Accepted<VirtualMachine> acceptedVirtualMachine = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.beginCreate();
VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue();
Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState());
LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus();
long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: pollResponse.getRetryAfter().toMillis();
}
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus);
VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult();
Assertions.assertEquals("Succeeded", virtualMachine.provisioningState());
Accepted<Void> acceptedDelete = computeManager.virtualMachines()
.beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name());
pollStatus = acceptedDelete.getActivationResponse().getStatus();
delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: (int) pollResponse.getRetryAfter().toMillis();
}
boolean deleted = false;
try {
computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException e) {
if (e.getResponse().getStatusCode() == 404
&& ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) {
deleted = true;
}
}
Assertions.assertTrue(deleted);
}
@Test
public void canCreateUpdatePriorityAndPrice() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withMaxPrice(1000.0)
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice());
Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy());
try {
foundVM.update().withMaxPrice(1500.0).apply();
Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice());
Assertions.fail();
} catch (ManagementException e) {
}
foundVM.deallocate();
foundVM.update().withMaxPrice(2000.0).apply();
foundVM.start();
Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority());
try {
foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority());
Assertions.fail();
} catch (ManagementException e) {
}
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
AvailabilitySet setCreated2 =
computeManager
.availabilitySets()
.define(availabilitySetName2)
.withRegion(regionProxPlacementGroup2)
.withNewResourceGroup(rgName2)
.withNewProximityPlacementGroup(proxGroupName2, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName2, setCreated2.name());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
try {
VirtualMachine updatedVm =
foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply();
} catch (ManagementException clEx) {
Assertions
.assertTrue(
clEx
.getMessage()
.contains(
"Updating proximity placement group of VM javavm is not allowed while the VM is running."
+ " Please stop/deallocate the VM and retry the operation."));
}
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply();
Assertions.assertNotNull(updatedVm.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0)));
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys;
List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys;
CreatedResources<VirtualMachine> createdVirtualMachines =
computeManager.virtualMachines().create(virtualMachineCreatables);
Assertions.assertTrue(createdVirtualMachines.size() == count);
Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
for (VirtualMachine virtualMachine : createdVirtualMachines.values()) {
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
}
Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
for (String networkCreatableKey : networkCreatableKeys) {
Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey);
Assertions.assertNotNull(createdNetwork);
Assertions.assertTrue(networkNames.contains(createdNetwork.name()));
}
Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
for (String publicIpCreatableKey : publicIpCreatableKeys) {
PublicIpAddress createdPublicIpAddress =
(PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey);
Assertions.assertNotNull(createdPublicIpAddress);
Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name()));
}
}
@Test
public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
final Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
final Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
final Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
final CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
final AtomicInteger resourceCount = new AtomicInteger(0);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
computeManager
.virtualMachines()
.createAsync(virtualMachineCreatables)
.map(
createdResource -> {
if (createdResource instanceof Resource) {
Resource resource = (Resource) createdResource;
System.out.println("Created: " + resource.id());
if (resource instanceof VirtualMachine) {
VirtualMachine virtualMachine = (VirtualMachine) resource;
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
} else if (resource instanceof Network) {
Network network = (Network) resource;
Assertions.assertTrue(networkNames.contains(network.name()));
Assertions.assertNotNull(network.id());
} else if (resource instanceof PublicIpAddress) {
PublicIpAddress publicIPAddress = (PublicIpAddress) resource;
Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name()));
Assertions.assertNotNull(publicIPAddress.id());
}
}
resourceCount.incrementAndGet();
return createdResource;
})
.blockLast();
networkNames.forEach(name -> {
Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name));
});
publicIPAddressNames.forEach(name -> {
Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name));
});
Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, resourceCount.get());
}
@Test
public void canSetStorageAccountForUnmanagedDisk() {
final String storageName = generateRandomResourceName("st", 14);
StorageAccount storageAccount =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withSku(StorageAccountSkuType.PREMIUM_LRS)
.create();
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.defineUnmanagedDataDisk("disk1")
.withNewVhd(100)
.withLun(2)
.storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.attach()
.defineUnmanagedDataDisk("disk2")
.withNewVhd(100)
.withLun(3)
.storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.attach()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.create();
Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2);
Assertions.assertNotNull(firstUnmanagedDataDisk);
VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3);
Assertions.assertNotNull(secondUnmanagedDataDisk);
String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri();
String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri();
Assertions.assertNotNull(createdVhdUri1);
Assertions.assertNotNull(createdVhdUri2);
computeManager.virtualMachines().deleteById(virtualMachine.id());
virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.create();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(1, unmanagedDataDisks.size());
firstUnmanagedDataDisk = null;
for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) {
firstUnmanagedDataDisk = unmanagedDisk;
break;
}
Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri());
Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1));
virtualMachine
.update()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.apply();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
}
@Test
public void canUpdateTagsOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
virtualMachine.update().withTag("test", "testValue").apply();
Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test"));
Map<String, String> testTags = new HashMap<String, String>();
testTags.put("testTag", "testValue");
virtualMachine.update().withTags(testTags).apply();
Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag"));
}
@Test
public void canRunScriptOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.create();
List<String> installGit = new ArrayList<>();
installGit.add("sudo apt-get update");
installGit.add("sudo apt-get install -y git");
RunCommandResult runResult =
virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>());
Assertions.assertNotNull(runResult);
Assertions.assertNotNull(runResult.value());
Assertions.assertTrue(runResult.value().size() > 0);
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canPerformSimulateEvictionOnSpotVirtualMachine() {
VirtualMachine virtualMachine = computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertTrue(virtualMachine.osDiskSize() > 0);
Disk disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertNotNull(disk);
Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState());
virtualMachine.simulateEviction();
boolean deallocated = false;
int pollIntervalInMinutes = 5;
for (int i = 0; i < 30; i += pollIntervalInMinutes) {
ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes));
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
if (virtualMachine.powerState() == PowerState.DEALLOCATED) {
deallocated = true;
break;
}
}
Assertions.assertTrue(deallocated);
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
Assertions.assertNotNull(virtualMachine);
Assertions.assertNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertEquals(0, virtualMachine.osDiskSize());
disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState());
}
@Test
public void canForceDeleteVirtualMachine() {
computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.create();
VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(virtualMachine);
Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region());
String nicId = virtualMachine.primaryNetworkInterfaceId();
computeManager.virtualMachines().deleteById(virtualMachine.id(), true);
try {
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException ex) {
virtualMachine = null;
Assertions.assertEquals(404, ex.getResponse().getStatusCode());
}
Assertions.assertNull(virtualMachine);
NetworkInterface nic = networkManager.networkInterfaces().getById(nicId);
Assertions.assertNotNull(nic);
}
@Test
@Test
public void canUpdateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm1.update()
.withNewDataDisk(10)
.apply();
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count());
Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.disks().deleteById(disk.id());
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm2.update()
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.apply();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count());
}
@Test
public void canHibernateVirtualMachine() {
VirtualMachine vm = computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3)
.enableHibernation()
.create();
Assertions.assertTrue(vm.isHibernationEnabled());
vm.deallocate(true);
InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream()
.filter(status -> "HibernationState/Hibernated".equals(status.code()))
.findFirst().orElse(null);
Assertions.assertNotNull(hibernationStatus);
vm.start();
vm.deallocate();
vm.update()
.disableHibernation()
.apply();
Assertions.assertFalse(vm.isHibernationEnabled());
}
private CreatablesInfo prepareCreatableVirtualMachines(
Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) {
Creatable<ResourceGroup> resourceGroupCreatable =
resourceManager.resourceGroups().define(rgName).withRegion(region);
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(generateRandomResourceName("stg", 20))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
List<String> networkCreatableKeys = new ArrayList<>();
List<String> publicIpCreatableKeys = new ArrayList<>();
List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>();
for (int i = 0; i < vmCount; i++) {
Creatable<Network> networkCreatable =
networkManager
.networks()
.define(String.format("%s-%d", networkNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withAddressSpace("10.0.0.0/28");
networkCreatableKeys.add(networkCreatable.key());
Creatable<PublicIpAddress> publicIPAddressCreatable =
networkManager
.publicIpAddresses()
.define(String.format("%s-%d", publicIpNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
publicIpCreatableKeys.add(publicIPAddressCreatable.key());
Creatable<VirtualMachine> virtualMachineCreatable =
computeManager
.virtualMachines()
.define(String.format("%s-%d", vmNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withNewPrimaryNetwork(networkCreatable)
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIPAddressCreatable)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("tirekicker")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withNewStorageAccount(storageAccountCreatable);
virtualMachineCreatables.add(virtualMachineCreatable);
}
CreatablesInfo creatablesInfo = new CreatablesInfo();
creatablesInfo.virtualMachineCreatables = virtualMachineCreatables;
creatablesInfo.networkCreatableKeys = networkCreatableKeys;
creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys;
return creatablesInfo;
}
class CreatablesInfo {
private List<Creatable<VirtualMachine>> virtualMachineCreatables;
List<String> networkCreatableKeys;
List<String> publicIpCreatableKeys;
}
} | class VirtualMachineOperationsTests extends ComputeManagementTest {
private String rgName = "";
private String rgName2 = "";
private final Region region = Region.US_EAST;
private final Region regionProxPlacementGroup = Region.US_WEST;
private final Region regionProxPlacementGroup2 = Region.US_EAST;
private final String vmName = "javavm";
private final String proxGroupName = "testproxgroup1";
private final String proxGroupName2 = "testproxgroup2";
private final String availabilitySetName = "availset1";
private final String availabilitySetName2 = "availset2";
private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
rgName = generateRandomResourceName("javacsmrg", 15);
rgName2 = generateRandomResourceName("javacsmrg2", 15);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName);
}
}
@Test
public void canCreateVirtualMachineWithNetworking() throws Exception {
NetworkSecurityGroup nsg =
this
.networkManager
.networkSecurityGroups()
.define("nsg")
.withRegion(region)
.withNewResourceGroup(rgName)
.defineRule("rule1")
.allowInbound()
.fromAnyAddress()
.fromPort(80)
.toAnyAddress()
.toPort(80)
.withProtocol(SecurityRuleProtocol.TCP)
.attach()
.create();
Creatable<Network> networkDefinition =
this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/28")
.defineSubnet("subnet1")
.withAddressPrefix("10.0.0.0/29")
.withExistingNetworkSecurityGroup(nsg)
.attach();
VirtualMachine vm =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork(networkDefinition)
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.create();
NetworkInterface primaryNic = vm.getPrimaryNetworkInterface();
Assertions.assertNotNull(primaryNic);
NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration();
Assertions.assertNotNull(primaryIpConfig);
Assertions.assertNotNull(primaryIpConfig.networkId());
Network network = primaryIpConfig.getNetwork();
Assertions.assertNotNull(primaryIpConfig.subnetName());
Subnet subnet = network.subnets().get(primaryIpConfig.subnetName());
Assertions.assertNotNull(subnet);
nsg = subnet.getNetworkSecurityGroup();
Assertions.assertNotNull(nsg);
Assertions.assertEquals("nsg", nsg.name());
Assertions.assertEquals(1, nsg.securityRules().size());
nsg = primaryIpConfig.getNetworkSecurityGroup();
Assertions.assertEquals("nsg", nsg.name());
}
@Test
public void canCreateVirtualMachine() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotCreateVirtualMachineSyncPoll() throws Exception {
final String mySqlInstallScript = "https:
final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x(";
Assertions.assertThrows(IllegalStateException.class, () -> {
Accepted<VirtualMachine> acceptedVirtualMachine =
this.computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.defineNewExtension("CustomScriptForLinux")
.withPublisher("Microsoft.OSTCExtensions")
.withType("CustomScriptForLinux")
.withVersion("1.4")
.withMinorVersionAutoUpgrade()
.withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript))
.withPublicSetting("commandToExecute", installCommand)
.attach()
.beginCreate();
});
boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName);
Assertions.assertFalse(dependentResourceCreated);
rgName = null;
}
@Test
public void canCreateVirtualMachineSyncPoll() throws Exception {
final long defaultDelayInMillis = 10 * 1000;
Accepted<VirtualMachine> acceptedVirtualMachine = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.beginCreate();
VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue();
Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState());
LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus();
long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: pollResponse.getRetryAfter().toMillis();
}
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus);
VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult();
Assertions.assertEquals("Succeeded", virtualMachine.provisioningState());
Accepted<Void> acceptedDelete = computeManager.virtualMachines()
.beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name());
pollStatus = acceptedDelete.getActivationResponse().getStatus();
delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: (int) pollResponse.getRetryAfter().toMillis();
}
boolean deleted = false;
try {
computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException e) {
if (e.getResponse().getStatusCode() == 404
&& ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) {
deleted = true;
}
}
Assertions.assertTrue(deleted);
}
@Test
public void canCreateUpdatePriorityAndPrice() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withMaxPrice(1000.0)
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice());
Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy());
try {
foundVM.update().withMaxPrice(1500.0).apply();
Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice());
Assertions.fail();
} catch (ManagementException e) {
}
foundVM.deallocate();
foundVM.update().withMaxPrice(2000.0).apply();
foundVM.start();
Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority());
try {
foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority());
Assertions.fail();
} catch (ManagementException e) {
}
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
AvailabilitySet setCreated2 =
computeManager
.availabilitySets()
.define(availabilitySetName2)
.withRegion(regionProxPlacementGroup2)
.withNewResourceGroup(rgName2)
.withNewProximityPlacementGroup(proxGroupName2, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName2, setCreated2.name());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
try {
VirtualMachine updatedVm =
foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply();
} catch (ManagementException clEx) {
Assertions
.assertTrue(
clEx
.getMessage()
.contains(
"Updating proximity placement group of VM javavm is not allowed while the VM is running."
+ " Please stop/deallocate the VM and retry the operation."));
}
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply();
Assertions.assertNotNull(updatedVm.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0)));
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys;
List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys;
CreatedResources<VirtualMachine> createdVirtualMachines =
computeManager.virtualMachines().create(virtualMachineCreatables);
Assertions.assertTrue(createdVirtualMachines.size() == count);
Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
for (VirtualMachine virtualMachine : createdVirtualMachines.values()) {
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
}
Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
for (String networkCreatableKey : networkCreatableKeys) {
Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey);
Assertions.assertNotNull(createdNetwork);
Assertions.assertTrue(networkNames.contains(createdNetwork.name()));
}
Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
for (String publicIpCreatableKey : publicIpCreatableKeys) {
PublicIpAddress createdPublicIpAddress =
(PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey);
Assertions.assertNotNull(createdPublicIpAddress);
Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name()));
}
}
@Test
public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
final Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
final Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
final Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
final CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
final AtomicInteger resourceCount = new AtomicInteger(0);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
computeManager
.virtualMachines()
.createAsync(virtualMachineCreatables)
.map(
createdResource -> {
if (createdResource instanceof Resource) {
Resource resource = (Resource) createdResource;
System.out.println("Created: " + resource.id());
if (resource instanceof VirtualMachine) {
VirtualMachine virtualMachine = (VirtualMachine) resource;
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
} else if (resource instanceof Network) {
Network network = (Network) resource;
Assertions.assertTrue(networkNames.contains(network.name()));
Assertions.assertNotNull(network.id());
} else if (resource instanceof PublicIpAddress) {
PublicIpAddress publicIPAddress = (PublicIpAddress) resource;
Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name()));
Assertions.assertNotNull(publicIPAddress.id());
}
}
resourceCount.incrementAndGet();
return createdResource;
})
.blockLast();
networkNames.forEach(name -> {
Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name));
});
publicIPAddressNames.forEach(name -> {
Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name));
});
Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, resourceCount.get());
}
@Test
public void canSetStorageAccountForUnmanagedDisk() {
final String storageName = generateRandomResourceName("st", 14);
StorageAccount storageAccount =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withSku(StorageAccountSkuType.PREMIUM_LRS)
.create();
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.defineUnmanagedDataDisk("disk1")
.withNewVhd(100)
.withLun(2)
.storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.attach()
.defineUnmanagedDataDisk("disk2")
.withNewVhd(100)
.withLun(3)
.storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.attach()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.create();
Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2);
Assertions.assertNotNull(firstUnmanagedDataDisk);
VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3);
Assertions.assertNotNull(secondUnmanagedDataDisk);
String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri();
String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri();
Assertions.assertNotNull(createdVhdUri1);
Assertions.assertNotNull(createdVhdUri2);
computeManager.virtualMachines().deleteById(virtualMachine.id());
virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.create();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(1, unmanagedDataDisks.size());
firstUnmanagedDataDisk = null;
for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) {
firstUnmanagedDataDisk = unmanagedDisk;
break;
}
Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri());
Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1));
virtualMachine
.update()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.apply();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
}
@Test
public void canUpdateTagsOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
virtualMachine.update().withTag("test", "testValue").apply();
Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test"));
Map<String, String> testTags = new HashMap<String, String>();
testTags.put("testTag", "testValue");
virtualMachine.update().withTags(testTags).apply();
Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag"));
}
@Test
public void canRunScriptOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.create();
List<String> installGit = new ArrayList<>();
installGit.add("sudo apt-get update");
installGit.add("sudo apt-get install -y git");
RunCommandResult runResult =
virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>());
Assertions.assertNotNull(runResult);
Assertions.assertNotNull(runResult.value());
Assertions.assertTrue(runResult.value().size() > 0);
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canPerformSimulateEvictionOnSpotVirtualMachine() {
VirtualMachine virtualMachine = computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertTrue(virtualMachine.osDiskSize() > 0);
Disk disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertNotNull(disk);
Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState());
virtualMachine.simulateEviction();
boolean deallocated = false;
int pollIntervalInMinutes = 5;
for (int i = 0; i < 30; i += pollIntervalInMinutes) {
ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes));
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
if (virtualMachine.powerState() == PowerState.DEALLOCATED) {
deallocated = true;
break;
}
}
Assertions.assertTrue(deallocated);
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
Assertions.assertNotNull(virtualMachine);
Assertions.assertNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertEquals(0, virtualMachine.osDiskSize());
disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState());
}
@Test
public void canForceDeleteVirtualMachine() {
computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.create();
VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(virtualMachine);
Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region());
String nicId = virtualMachine.primaryNetworkInterfaceId();
computeManager.virtualMachines().deleteById(virtualMachine.id(), true);
try {
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException ex) {
virtualMachine = null;
Assertions.assertEquals(404, ex.getResponse().getStatusCode());
}
Assertions.assertNull(virtualMachine);
NetworkInterface nic = networkManager.networkInterfaces().getById(nicId);
Assertions.assertNotNull(nic);
}
@Test
@Test
public void canUpdateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm1.update()
.withNewDataDisk(10)
.apply();
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count());
Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.disks().deleteById(disk.id());
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm2.update()
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.apply();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count());
}
@Test
public void canHibernateVirtualMachine() {
VirtualMachine vm = computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3)
.enableHibernation()
.create();
Assertions.assertTrue(vm.isHibernationEnabled());
vm.deallocate(true);
InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream()
.filter(status -> "HibernationState/Hibernated".equals(status.code()))
.findFirst().orElse(null);
Assertions.assertNotNull(hibernationStatus);
vm.start();
vm.deallocate();
vm.update()
.disableHibernation()
.apply();
Assertions.assertFalse(vm.isHibernationEnabled());
}
private CreatablesInfo prepareCreatableVirtualMachines(
Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) {
Creatable<ResourceGroup> resourceGroupCreatable =
resourceManager.resourceGroups().define(rgName).withRegion(region);
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(generateRandomResourceName("stg", 20))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
List<String> networkCreatableKeys = new ArrayList<>();
List<String> publicIpCreatableKeys = new ArrayList<>();
List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>();
for (int i = 0; i < vmCount; i++) {
Creatable<Network> networkCreatable =
networkManager
.networks()
.define(String.format("%s-%d", networkNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withAddressSpace("10.0.0.0/28");
networkCreatableKeys.add(networkCreatable.key());
Creatable<PublicIpAddress> publicIPAddressCreatable =
networkManager
.publicIpAddresses()
.define(String.format("%s-%d", publicIpNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
publicIpCreatableKeys.add(publicIPAddressCreatable.key());
Creatable<VirtualMachine> virtualMachineCreatable =
computeManager
.virtualMachines()
.define(String.format("%s-%d", vmNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withNewPrimaryNetwork(networkCreatable)
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIPAddressCreatable)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("tirekicker")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withNewStorageAccount(storageAccountCreatable);
virtualMachineCreatables.add(virtualMachineCreatable);
}
CreatablesInfo creatablesInfo = new CreatablesInfo();
creatablesInfo.virtualMachineCreatables = virtualMachineCreatables;
creatablesInfo.networkCreatableKeys = networkCreatableKeys;
creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys;
return creatablesInfo;
}
class CreatablesInfo {
private List<Creatable<VirtualMachine>> virtualMachineCreatables;
List<String> networkCreatableKeys;
List<String> publicIpCreatableKeys;
}
} |
This utility method wraps the `TestDelayProvider` (in playback), which always return 1ms for any Duration. | public void canCreateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
final String publicIpDnsLabel = generateRandomResourceName("pip", 20);
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id());
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id());
String secondaryNicName = generateRandomResourceName("nic", 10);
Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm3 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm3.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count());
} | ResourceManagerUtils.sleep(Duration.ofSeconds(10)); | public void canCreateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
final String publicIpDnsLabel = generateRandomResourceName("pip", 20);
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id());
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id());
String secondaryNicName = generateRandomResourceName("nic", 10);
Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count());
secondaryNetworkInterfaceCreatable =
this
.networkManager
.networkInterfaces()
.define(secondaryNicName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic();
VirtualMachine vm3 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withNewDataDisk(computeManager.disks()
.define("datadisk2")
.withRegion(region)
.withExistingResourceGroup(rgName)
.withData()
.withSizeInGB(10))
.withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
computeManager.virtualMachines().deleteById(vm3.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count());
} | class VirtualMachineOperationsTests extends ComputeManagementTest {
private String rgName = "";
private String rgName2 = "";
private final Region region = Region.US_EAST;
private final Region regionProxPlacementGroup = Region.US_WEST;
private final Region regionProxPlacementGroup2 = Region.US_EAST;
private final String vmName = "javavm";
private final String proxGroupName = "testproxgroup1";
private final String proxGroupName2 = "testproxgroup2";
private final String availabilitySetName = "availset1";
private final String availabilitySetName2 = "availset2";
private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
rgName = generateRandomResourceName("javacsmrg", 15);
rgName2 = generateRandomResourceName("javacsmrg2", 15);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName);
}
}
@Test
public void canCreateVirtualMachineWithNetworking() throws Exception {
NetworkSecurityGroup nsg =
this
.networkManager
.networkSecurityGroups()
.define("nsg")
.withRegion(region)
.withNewResourceGroup(rgName)
.defineRule("rule1")
.allowInbound()
.fromAnyAddress()
.fromPort(80)
.toAnyAddress()
.toPort(80)
.withProtocol(SecurityRuleProtocol.TCP)
.attach()
.create();
Creatable<Network> networkDefinition =
this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/28")
.defineSubnet("subnet1")
.withAddressPrefix("10.0.0.0/29")
.withExistingNetworkSecurityGroup(nsg)
.attach();
VirtualMachine vm =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork(networkDefinition)
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.create();
NetworkInterface primaryNic = vm.getPrimaryNetworkInterface();
Assertions.assertNotNull(primaryNic);
NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration();
Assertions.assertNotNull(primaryIpConfig);
Assertions.assertNotNull(primaryIpConfig.networkId());
Network network = primaryIpConfig.getNetwork();
Assertions.assertNotNull(primaryIpConfig.subnetName());
Subnet subnet = network.subnets().get(primaryIpConfig.subnetName());
Assertions.assertNotNull(subnet);
nsg = subnet.getNetworkSecurityGroup();
Assertions.assertNotNull(nsg);
Assertions.assertEquals("nsg", nsg.name());
Assertions.assertEquals(1, nsg.securityRules().size());
nsg = primaryIpConfig.getNetworkSecurityGroup();
Assertions.assertEquals("nsg", nsg.name());
}
@Test
public void canCreateVirtualMachine() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotCreateVirtualMachineSyncPoll() throws Exception {
final String mySqlInstallScript = "https:
final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x(";
Assertions.assertThrows(IllegalStateException.class, () -> {
Accepted<VirtualMachine> acceptedVirtualMachine =
this.computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.defineNewExtension("CustomScriptForLinux")
.withPublisher("Microsoft.OSTCExtensions")
.withType("CustomScriptForLinux")
.withVersion("1.4")
.withMinorVersionAutoUpgrade()
.withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript))
.withPublicSetting("commandToExecute", installCommand)
.attach()
.beginCreate();
});
boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName);
Assertions.assertFalse(dependentResourceCreated);
rgName = null;
}
@Test
public void canCreateVirtualMachineSyncPoll() throws Exception {
final long defaultDelayInMillis = 10 * 1000;
Accepted<VirtualMachine> acceptedVirtualMachine = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.beginCreate();
VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue();
Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState());
LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus();
long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: pollResponse.getRetryAfter().toMillis();
}
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus);
VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult();
Assertions.assertEquals("Succeeded", virtualMachine.provisioningState());
Accepted<Void> acceptedDelete = computeManager.virtualMachines()
.beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name());
pollStatus = acceptedDelete.getActivationResponse().getStatus();
delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: (int) pollResponse.getRetryAfter().toMillis();
}
boolean deleted = false;
try {
computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException e) {
if (e.getResponse().getStatusCode() == 404
&& ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) {
deleted = true;
}
}
Assertions.assertTrue(deleted);
}
@Test
public void canCreateUpdatePriorityAndPrice() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withMaxPrice(1000.0)
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice());
Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy());
try {
foundVM.update().withMaxPrice(1500.0).apply();
Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice());
Assertions.fail();
} catch (ManagementException e) {
}
foundVM.deallocate();
foundVM.update().withMaxPrice(2000.0).apply();
foundVM.start();
Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority());
try {
foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority());
Assertions.fail();
} catch (ManagementException e) {
}
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
AvailabilitySet setCreated2 =
computeManager
.availabilitySets()
.define(availabilitySetName2)
.withRegion(regionProxPlacementGroup2)
.withNewResourceGroup(rgName2)
.withNewProximityPlacementGroup(proxGroupName2, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName2, setCreated2.name());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
try {
VirtualMachine updatedVm =
foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply();
} catch (ManagementException clEx) {
Assertions
.assertTrue(
clEx
.getMessage()
.contains(
"Updating proximity placement group of VM javavm is not allowed while the VM is running."
+ " Please stop/deallocate the VM and retry the operation."));
}
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply();
Assertions.assertNotNull(updatedVm.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0)));
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys;
List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys;
CreatedResources<VirtualMachine> createdVirtualMachines =
computeManager.virtualMachines().create(virtualMachineCreatables);
Assertions.assertTrue(createdVirtualMachines.size() == count);
Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
for (VirtualMachine virtualMachine : createdVirtualMachines.values()) {
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
}
Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
for (String networkCreatableKey : networkCreatableKeys) {
Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey);
Assertions.assertNotNull(createdNetwork);
Assertions.assertTrue(networkNames.contains(createdNetwork.name()));
}
Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
for (String publicIpCreatableKey : publicIpCreatableKeys) {
PublicIpAddress createdPublicIpAddress =
(PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey);
Assertions.assertNotNull(createdPublicIpAddress);
Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name()));
}
}
@Test
public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
final Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
final Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
final Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
final CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
final AtomicInteger resourceCount = new AtomicInteger(0);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
computeManager
.virtualMachines()
.createAsync(virtualMachineCreatables)
.map(
createdResource -> {
if (createdResource instanceof Resource) {
Resource resource = (Resource) createdResource;
System.out.println("Created: " + resource.id());
if (resource instanceof VirtualMachine) {
VirtualMachine virtualMachine = (VirtualMachine) resource;
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
} else if (resource instanceof Network) {
Network network = (Network) resource;
Assertions.assertTrue(networkNames.contains(network.name()));
Assertions.assertNotNull(network.id());
} else if (resource instanceof PublicIpAddress) {
PublicIpAddress publicIPAddress = (PublicIpAddress) resource;
Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name()));
Assertions.assertNotNull(publicIPAddress.id());
}
}
resourceCount.incrementAndGet();
return createdResource;
})
.blockLast();
networkNames.forEach(name -> {
Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name));
});
publicIPAddressNames.forEach(name -> {
Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name));
});
Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, resourceCount.get());
}
@Test
public void canSetStorageAccountForUnmanagedDisk() {
final String storageName = generateRandomResourceName("st", 14);
StorageAccount storageAccount =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withSku(StorageAccountSkuType.PREMIUM_LRS)
.create();
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.defineUnmanagedDataDisk("disk1")
.withNewVhd(100)
.withLun(2)
.storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.attach()
.defineUnmanagedDataDisk("disk2")
.withNewVhd(100)
.withLun(3)
.storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.attach()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.create();
Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2);
Assertions.assertNotNull(firstUnmanagedDataDisk);
VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3);
Assertions.assertNotNull(secondUnmanagedDataDisk);
String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri();
String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri();
Assertions.assertNotNull(createdVhdUri1);
Assertions.assertNotNull(createdVhdUri2);
computeManager.virtualMachines().deleteById(virtualMachine.id());
virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.create();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(1, unmanagedDataDisks.size());
firstUnmanagedDataDisk = null;
for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) {
firstUnmanagedDataDisk = unmanagedDisk;
break;
}
Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri());
Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1));
virtualMachine
.update()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.apply();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
}
@Test
public void canUpdateTagsOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
virtualMachine.update().withTag("test", "testValue").apply();
Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test"));
Map<String, String> testTags = new HashMap<String, String>();
testTags.put("testTag", "testValue");
virtualMachine.update().withTags(testTags).apply();
Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag"));
}
@Test
public void canRunScriptOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.create();
List<String> installGit = new ArrayList<>();
installGit.add("sudo apt-get update");
installGit.add("sudo apt-get install -y git");
RunCommandResult runResult =
virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>());
Assertions.assertNotNull(runResult);
Assertions.assertNotNull(runResult.value());
Assertions.assertTrue(runResult.value().size() > 0);
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canPerformSimulateEvictionOnSpotVirtualMachine() {
VirtualMachine virtualMachine = computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertTrue(virtualMachine.osDiskSize() > 0);
Disk disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertNotNull(disk);
Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState());
virtualMachine.simulateEviction();
boolean deallocated = false;
int pollIntervalInMinutes = 5;
for (int i = 0; i < 30; i += pollIntervalInMinutes) {
ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes));
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
if (virtualMachine.powerState() == PowerState.DEALLOCATED) {
deallocated = true;
break;
}
}
Assertions.assertTrue(deallocated);
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
Assertions.assertNotNull(virtualMachine);
Assertions.assertNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertEquals(0, virtualMachine.osDiskSize());
disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState());
}
@Test
public void canForceDeleteVirtualMachine() {
computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.create();
VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(virtualMachine);
Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region());
String nicId = virtualMachine.primaryNetworkInterfaceId();
computeManager.virtualMachines().deleteById(virtualMachine.id(), true);
try {
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException ex) {
virtualMachine = null;
Assertions.assertEquals(404, ex.getResponse().getStatusCode());
}
Assertions.assertNull(virtualMachine);
NetworkInterface nic = networkManager.networkInterfaces().getById(nicId);
Assertions.assertNotNull(nic);
}
@Test
@Test
public void canUpdateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm1.update()
.withNewDataDisk(10)
.apply();
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count());
Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.disks().deleteById(disk.id());
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm2.update()
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.apply();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count());
}
@Test
public void canHibernateVirtualMachine() {
VirtualMachine vm = computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3)
.enableHibernation()
.create();
Assertions.assertTrue(vm.isHibernationEnabled());
vm.deallocate(true);
InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream()
.filter(status -> "HibernationState/Hibernated".equals(status.code()))
.findFirst().orElse(null);
Assertions.assertNotNull(hibernationStatus);
vm.start();
vm.deallocate();
vm.update()
.disableHibernation()
.apply();
Assertions.assertFalse(vm.isHibernationEnabled());
}
private CreatablesInfo prepareCreatableVirtualMachines(
Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) {
Creatable<ResourceGroup> resourceGroupCreatable =
resourceManager.resourceGroups().define(rgName).withRegion(region);
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(generateRandomResourceName("stg", 20))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
List<String> networkCreatableKeys = new ArrayList<>();
List<String> publicIpCreatableKeys = new ArrayList<>();
List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>();
for (int i = 0; i < vmCount; i++) {
Creatable<Network> networkCreatable =
networkManager
.networks()
.define(String.format("%s-%d", networkNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withAddressSpace("10.0.0.0/28");
networkCreatableKeys.add(networkCreatable.key());
Creatable<PublicIpAddress> publicIPAddressCreatable =
networkManager
.publicIpAddresses()
.define(String.format("%s-%d", publicIpNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
publicIpCreatableKeys.add(publicIPAddressCreatable.key());
Creatable<VirtualMachine> virtualMachineCreatable =
computeManager
.virtualMachines()
.define(String.format("%s-%d", vmNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withNewPrimaryNetwork(networkCreatable)
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIPAddressCreatable)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("tirekicker")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withNewStorageAccount(storageAccountCreatable);
virtualMachineCreatables.add(virtualMachineCreatable);
}
CreatablesInfo creatablesInfo = new CreatablesInfo();
creatablesInfo.virtualMachineCreatables = virtualMachineCreatables;
creatablesInfo.networkCreatableKeys = networkCreatableKeys;
creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys;
return creatablesInfo;
}
class CreatablesInfo {
private List<Creatable<VirtualMachine>> virtualMachineCreatables;
List<String> networkCreatableKeys;
List<String> publicIpCreatableKeys;
}
} | class VirtualMachineOperationsTests extends ComputeManagementTest {
private String rgName = "";
private String rgName2 = "";
private final Region region = Region.US_EAST;
private final Region regionProxPlacementGroup = Region.US_WEST;
private final Region regionProxPlacementGroup2 = Region.US_EAST;
private final String vmName = "javavm";
private final String proxGroupName = "testproxgroup1";
private final String proxGroupName2 = "testproxgroup2";
private final String availabilitySetName = "availset1";
private final String availabilitySetName2 = "availset2";
private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
rgName = generateRandomResourceName("javacsmrg", 15);
rgName2 = generateRandomResourceName("javacsmrg2", 15);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName);
}
}
@Test
public void canCreateVirtualMachineWithNetworking() throws Exception {
NetworkSecurityGroup nsg =
this
.networkManager
.networkSecurityGroups()
.define("nsg")
.withRegion(region)
.withNewResourceGroup(rgName)
.defineRule("rule1")
.allowInbound()
.fromAnyAddress()
.fromPort(80)
.toAnyAddress()
.toPort(80)
.withProtocol(SecurityRuleProtocol.TCP)
.attach()
.create();
Creatable<Network> networkDefinition =
this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/28")
.defineSubnet("subnet1")
.withAddressPrefix("10.0.0.0/29")
.withExistingNetworkSecurityGroup(nsg)
.attach();
VirtualMachine vm =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork(networkDefinition)
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.create();
NetworkInterface primaryNic = vm.getPrimaryNetworkInterface();
Assertions.assertNotNull(primaryNic);
NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration();
Assertions.assertNotNull(primaryIpConfig);
Assertions.assertNotNull(primaryIpConfig.networkId());
Network network = primaryIpConfig.getNetwork();
Assertions.assertNotNull(primaryIpConfig.subnetName());
Subnet subnet = network.subnets().get(primaryIpConfig.subnetName());
Assertions.assertNotNull(subnet);
nsg = subnet.getNetworkSecurityGroup();
Assertions.assertNotNull(nsg);
Assertions.assertEquals("nsg", nsg.name());
Assertions.assertEquals(1, nsg.securityRules().size());
nsg = primaryIpConfig.getNetworkSecurityGroup();
Assertions.assertEquals("nsg", nsg.name());
}
@Test
public void canCreateVirtualMachine() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotCreateVirtualMachineSyncPoll() throws Exception {
final String mySqlInstallScript = "https:
final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x(";
Assertions.assertThrows(IllegalStateException.class, () -> {
Accepted<VirtualMachine> acceptedVirtualMachine =
this.computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.defineNewExtension("CustomScriptForLinux")
.withPublisher("Microsoft.OSTCExtensions")
.withType("CustomScriptForLinux")
.withVersion("1.4")
.withMinorVersionAutoUpgrade()
.withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript))
.withPublicSetting("commandToExecute", installCommand)
.attach()
.beginCreate();
});
boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName);
Assertions.assertFalse(dependentResourceCreated);
rgName = null;
}
@Test
public void canCreateVirtualMachineSyncPoll() throws Exception {
final long defaultDelayInMillis = 10 * 1000;
Accepted<VirtualMachine> acceptedVirtualMachine = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.beginCreate();
VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue();
Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState());
LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus();
long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: pollResponse.getRetryAfter().toMillis();
}
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus);
VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult();
Assertions.assertEquals("Succeeded", virtualMachine.provisioningState());
Accepted<Void> acceptedDelete = computeManager.virtualMachines()
.beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name());
pollStatus = acceptedDelete.getActivationResponse().getStatus();
delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null
? defaultDelayInMillis
: (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis();
while (!pollStatus.isComplete()) {
ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills));
PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll();
pollStatus = pollResponse.getStatus();
delayInMills = pollResponse.getRetryAfter() == null
? defaultDelayInMillis
: (int) pollResponse.getRetryAfter().toMillis();
}
boolean deleted = false;
try {
computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException e) {
if (e.getResponse().getStatusCode() == 404
&& ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) {
deleted = true;
}
}
Assertions.assertTrue(deleted);
}
@Test
public void canCreateUpdatePriorityAndPrice() throws Exception {
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withMaxPrice(1000.0)
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(region, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice());
Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy());
try {
foundVM.update().withMaxPrice(1500.0).apply();
Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice());
Assertions.fail();
} catch (ManagementException e) {
}
foundVM.deallocate();
foundVM.update().withMaxPrice(2000.0).apply();
foundVM.start();
Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority());
foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority());
try {
foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply();
Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority());
Assertions.fail();
} catch (ManagementException e) {
}
computeManager.virtualMachines().deleteById(foundVM.id());
}
@Test
public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
AvailabilitySet setCreated2 =
computeManager
.availabilitySets()
.define(availabilitySetName2)
.withRegion(regionProxPlacementGroup2)
.withNewResourceGroup(rgName2)
.withNewProximityPlacementGroup(proxGroupName2, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName2, setCreated2.name());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
try {
VirtualMachine updatedVm =
foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply();
} catch (ManagementException clEx) {
Assertions
.assertTrue(
clEx
.getMessage()
.contains(
"Updating proximity placement group of VM javavm is not allowed while the VM is running."
+ " Please stop/deallocate the VM and retry the operation."));
}
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception {
AvailabilitySet setCreated =
computeManager
.availabilitySets()
.define(availabilitySetName)
.withRegion(regionProxPlacementGroup)
.withNewResourceGroup(rgName)
.withNewProximityPlacementGroup(proxGroupName, proxGroupType)
.create();
Assertions.assertEquals(availabilitySetName, setCreated.name());
Assertions.assertNotNull(setCreated.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location());
computeManager
.virtualMachines()
.define(vmName)
.withRegion(regionProxPlacementGroup)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withProximityPlacementGroup(setCreated.proximityPlacementGroup().id())
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withUnmanagedDisks()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.withOSDiskName("javatest")
.withLicenseType("Windows_Server")
.create();
VirtualMachine foundVM = null;
PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName);
for (VirtualMachine vm1 : vms) {
if (vm1.name().equals(vmName)) {
foundVM = vm1;
break;
}
}
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(foundVM);
Assertions.assertEquals(regionProxPlacementGroup, foundVM.region());
Assertions.assertEquals("Windows_Server", foundVM.licenseType());
PowerState powerState = foundVM.powerState();
Assertions.assertEquals(powerState, PowerState.RUNNING);
VirtualMachineInstanceView instanceView = foundVM.instanceView();
Assertions.assertNotNull(instanceView);
Assertions.assertNotNull(instanceView.statuses().size() > 0);
Assertions.assertNotNull(foundVM.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0)));
Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds());
Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty());
Assertions
.assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0)));
VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply();
Assertions.assertNotNull(updatedVm.proximityPlacementGroup());
Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType());
Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds());
Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty());
Assertions
.assertTrue(
setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0)));
computeManager.virtualMachines().deleteById(foundVM.id());
computeManager.availabilitySets().deleteById(setCreated.id());
}
@Test
public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys;
List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys;
CreatedResources<VirtualMachine> createdVirtualMachines =
computeManager.virtualMachines().create(virtualMachineCreatables);
Assertions.assertTrue(createdVirtualMachines.size() == count);
Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
for (VirtualMachine virtualMachine : createdVirtualMachines.values()) {
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
}
Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
for (String networkCreatableKey : networkCreatableKeys) {
Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey);
Assertions.assertNotNull(createdNetwork);
Assertions.assertTrue(networkNames.contains(createdNetwork.name()));
}
Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
for (String publicIpCreatableKey : publicIpCreatableKeys) {
PublicIpAddress createdPublicIpAddress =
(PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey);
Assertions.assertNotNull(createdPublicIpAddress);
Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name()));
}
}
@Test
public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception {
String vmNamePrefix = "vmz";
String publicIpNamePrefix = generateRandomResourceName("pip-", 15);
String networkNamePrefix = generateRandomResourceName("vnet-", 15);
int count = 5;
final Set<String> virtualMachineNames = new HashSet<>();
for (int i = 0; i < count; i++) {
virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i));
}
final Set<String> networkNames = new HashSet<>();
for (int i = 0; i < count; i++) {
networkNames.add(String.format("%s-%d", networkNamePrefix, i));
}
final Set<String> publicIPAddressNames = new HashSet<>();
for (int i = 0; i < count; i++) {
publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i));
}
final CreatablesInfo creatablesInfo =
prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count);
final AtomicInteger resourceCount = new AtomicInteger(0);
List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables;
computeManager
.virtualMachines()
.createAsync(virtualMachineCreatables)
.map(
createdResource -> {
if (createdResource instanceof Resource) {
Resource resource = (Resource) createdResource;
System.out.println("Created: " + resource.id());
if (resource instanceof VirtualMachine) {
VirtualMachine virtualMachine = (VirtualMachine) resource;
Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name()));
Assertions.assertNotNull(virtualMachine.id());
} else if (resource instanceof Network) {
Network network = (Network) resource;
Assertions.assertTrue(networkNames.contains(network.name()));
Assertions.assertNotNull(network.id());
} else if (resource instanceof PublicIpAddress) {
PublicIpAddress publicIPAddress = (PublicIpAddress) resource;
Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name()));
Assertions.assertNotNull(publicIPAddress.id());
}
}
resourceCount.incrementAndGet();
return createdResource;
})
.blockLast();
networkNames.forEach(name -> {
Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name));
});
publicIPAddressNames.forEach(name -> {
Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name));
});
Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count());
Assertions.assertEquals(count, resourceCount.get());
}
@Test
public void canSetStorageAccountForUnmanagedDisk() {
final String storageName = generateRandomResourceName("st", 14);
StorageAccount storageAccount =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withSku(StorageAccountSkuType.PREMIUM_LRS)
.create();
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.defineUnmanagedDataDisk("disk1")
.withNewVhd(100)
.withLun(2)
.storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.attach()
.defineUnmanagedDataDisk("disk2")
.withNewVhd(100)
.withLun(3)
.storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.attach()
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.withOSDiskCaching(CachingTypes.READ_WRITE)
.create();
Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2);
Assertions.assertNotNull(firstUnmanagedDataDisk);
VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3);
Assertions.assertNotNull(secondUnmanagedDataDisk);
String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri();
String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri();
Assertions.assertNotNull(createdVhdUri1);
Assertions.assertNotNull(createdVhdUri2);
computeManager.virtualMachines().deleteById(virtualMachine.id());
virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("Foo12")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd")
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4"))
.create();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(1, unmanagedDataDisks.size());
firstUnmanagedDataDisk = null;
for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) {
firstUnmanagedDataDisk = unmanagedDisk;
break;
}
Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri());
Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1));
virtualMachine
.update()
.withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd")
.apply();
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
unmanagedDataDisks = virtualMachine.unmanagedDataDisks();
Assertions.assertNotNull(unmanagedDataDisks);
Assertions.assertEquals(2, unmanagedDataDisks.size());
}
@Test
public void canUpdateTagsOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
virtualMachine.update().withTag("test", "testValue").apply();
Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test"));
Map<String, String> testTags = new HashMap<String, String>();
testTags.put("testTag", "testValue");
virtualMachine.update().withTags(testTags).apply();
Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag"));
}
@Test
public void canRunScriptOnVM() {
VirtualMachine virtualMachine =
computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.create();
List<String> installGit = new ArrayList<>();
installGit.add("sudo apt-get update");
installGit.add("sudo apt-get install -y git");
RunCommandResult runResult =
virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>());
Assertions.assertNotNull(runResult);
Assertions.assertNotNull(runResult.value());
Assertions.assertTrue(runResult.value().size() > 0);
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canPerformSimulateEvictionOnSpotVirtualMachine() {
VirtualMachine virtualMachine = computeManager.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("firstuser")
.withSsh(sshPublicKey())
.withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE)
.withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4"))
.create();
Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertTrue(virtualMachine.osDiskSize() > 0);
Disk disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertNotNull(disk);
Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState());
virtualMachine.simulateEviction();
boolean deallocated = false;
int pollIntervalInMinutes = 5;
for (int i = 0; i < 30; i += pollIntervalInMinutes) {
ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes));
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
if (virtualMachine.powerState() == PowerState.DEALLOCATED) {
deallocated = true;
break;
}
}
Assertions.assertTrue(deallocated);
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
Assertions.assertNotNull(virtualMachine);
Assertions.assertNull(virtualMachine.osDiskStorageAccountType());
Assertions.assertEquals(0, virtualMachine.osDiskSize());
disk = computeManager.disks().getById(virtualMachine.osDiskId());
Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState());
}
@Test
public void canForceDeleteVirtualMachine() {
computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.create();
VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName);
Assertions.assertNotNull(virtualMachine);
Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region());
String nicId = virtualMachine.primaryNetworkInterfaceId();
computeManager.virtualMachines().deleteById(virtualMachine.id(), true);
try {
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
} catch (ManagementException ex) {
virtualMachine = null;
Assertions.assertEquals(404, ex.getResponse().getStatusCode());
}
Assertions.assertNull(virtualMachine);
NetworkInterface nic = networkManager.networkInterfaces().getById(nicId);
Assertions.assertNotNull(nic);
}
@Test
@Test
public void canUpdateVirtualMachineWithDeleteOption() throws Exception {
Region region = Region.US_WEST2;
Network network = this
.networkManager
.networks()
.define("network1")
.withRegion(region)
.withNewResourceGroup(rgName)
.withAddressSpace("10.0.0.0/24")
.withSubnet("default", "10.0.0.0/24")
.create();
VirtualMachine vm1 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.withOSDiskDeleteOptions(DeleteOptions.DELETE)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm1.update()
.withNewDataDisk(10)
.apply();
computeManager.virtualMachines().deleteById(vm1.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count());
Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get();
computeManager.disks().deleteById(disk.id());
VirtualMachine vm2 = computeManager
.virtualMachines()
.define(vmName)
.withRegion(region)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("default")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS)
.withRootUsername("testuser")
.withSsh(sshPublicKey())
.withNewDataDisk(10)
.withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE)
.withSize(VirtualMachineSizeTypes.STANDARD_A1_V2)
.create();
vm2.update()
.withNewDataDisk(10)
.withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE)
.apply();
computeManager.virtualMachines().deleteById(vm2.id());
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count());
}
@Test
public void canHibernateVirtualMachine() {
VirtualMachine vm = computeManager.virtualMachines()
.define(vmName)
.withRegion("eastus2euap")
.withNewResourceGroup(rgName)
.withNewPrimaryNetwork("10.0.0.0/28")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER)
.withAdminUsername("Foo12")
.withAdminPassword(password())
.withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3)
.enableHibernation()
.create();
Assertions.assertTrue(vm.isHibernationEnabled());
vm.deallocate(true);
InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream()
.filter(status -> "HibernationState/Hibernated".equals(status.code()))
.findFirst().orElse(null);
Assertions.assertNotNull(hibernationStatus);
vm.start();
vm.deallocate();
vm.update()
.disableHibernation()
.apply();
Assertions.assertFalse(vm.isHibernationEnabled());
}
private CreatablesInfo prepareCreatableVirtualMachines(
Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) {
Creatable<ResourceGroup> resourceGroupCreatable =
resourceManager.resourceGroups().define(rgName).withRegion(region);
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(generateRandomResourceName("stg", 20))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
List<String> networkCreatableKeys = new ArrayList<>();
List<String> publicIpCreatableKeys = new ArrayList<>();
List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>();
for (int i = 0; i < vmCount; i++) {
Creatable<Network> networkCreatable =
networkManager
.networks()
.define(String.format("%s-%d", networkNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withAddressSpace("10.0.0.0/28");
networkCreatableKeys.add(networkCreatable.key());
Creatable<PublicIpAddress> publicIPAddressCreatable =
networkManager
.publicIpAddresses()
.define(String.format("%s-%d", publicIpNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable);
publicIpCreatableKeys.add(publicIPAddressCreatable.key());
Creatable<VirtualMachine> virtualMachineCreatable =
computeManager
.virtualMachines()
.define(String.format("%s-%d", vmNamePrefix, i))
.withRegion(region)
.withNewResourceGroup(resourceGroupCreatable)
.withNewPrimaryNetwork(networkCreatable)
.withPrimaryPrivateIPAddressDynamic()
.withNewPrimaryPublicIPAddress(publicIPAddressCreatable)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername("tirekicker")
.withSsh(sshPublicKey())
.withUnmanagedDisks()
.withNewStorageAccount(storageAccountCreatable);
virtualMachineCreatables.add(virtualMachineCreatable);
}
CreatablesInfo creatablesInfo = new CreatablesInfo();
creatablesInfo.virtualMachineCreatables = virtualMachineCreatables;
creatablesInfo.networkCreatableKeys = networkCreatableKeys;
creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys;
return creatablesInfo;
}
class CreatablesInfo {
private List<Creatable<VirtualMachine>> virtualMachineCreatables;
List<String> networkCreatableKeys;
List<String> publicIpCreatableKeys;
}
} |
Why was this changed? That doesn't seem right. | public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[0]);
} | return keySet.toArray(new String[0]); | public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[keySet.size()]);
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private final String context;
private Map<String, Object> properties = new LinkedHashMap<>();
private final String label;
private AppConfigurationProperties appConfigurationProperties;
private static ObjectMapper mapper = new ObjectMapper();
private HashMap<String, KeyVaultClient> keyVaultClients;
private ClientStore clients;
private KeyVaultCredentialProvider keyVaultCredentialProvider;
private SecretClientBuilderSetup keyVaultClientProvider;
private AppConfigurationProviderProperties appProperties;
private ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
@Override
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call
* {@code initFeatures} to update Feature Management, but make sure its done in the
* last {@code AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @throws IOException Thrown when processing key/value failed when reading feature
* flags
* @return Updated Feature Set from Property Source
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
settingSelector.setKeyFilter(".appconfig*");
List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName);
if (settings == null || features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) {
String entry = getKeyVaultEntry(setting.getValue());
if (entry != null) {
properties.put(key, entry);
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its
* entry in Key Vault.
*
* @param value {"uri":
* "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(String value) {
String secretValue = null;
try {
URI uri = null;
try {
JsonNode kvReference = mapper.readTree(value);
uri = new URI(kvReference.at("/uri").asText());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost())
.getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retrieving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
KeyVaultClient getKeyVaultClient(URI uri, String uriHost) {
return keyVaultClients.computeIfAbsent(uriHost, ignored ->
new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider));
}
/**
* Initializes Feature Management configurations. Only one
* {@code AppConfigurationPropertySource} can call this, and it needs to be done after
* the rest have run initProperties.
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
/**
* Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}.
*
* @param featureSet The parsed KeyValueItems will be added to this
* @param settings New items read in from Azure
* @param date Cache timestamp
* @throws IOException
*/
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
Object feature = createFeature(setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into
* properties.
* @return Feature created from KeyValueItem
* @throws IOException
*/
private Object createFeature(ConfigurationSetting item) throws IOException {
Feature feature = null;
if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) {
try {
String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class);
feature = new Feature(key, featureItem);
if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) {
return true;
} else if (!featureItem.getEnabled()) {
return false;
}
return feature;
} catch (IOException e) {
throw new IOException("Unable to parse Feature Management values from Azure.", e);
}
} else {
String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(),
item.getContentType());
throw new IOException(message);
}
}
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private final String context;
private Map<String, Object> properties = new LinkedHashMap<>();
private final String label;
private AppConfigurationProperties appConfigurationProperties;
private static ObjectMapper mapper = new ObjectMapper();
private HashMap<String, KeyVaultClient> keyVaultClients;
private ClientStore clients;
private KeyVaultCredentialProvider keyVaultCredentialProvider;
private SecretClientBuilderSetup keyVaultClientProvider;
private AppConfigurationProviderProperties appProperties;
private ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
@Override
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call
* {@code initFeatures} to update Feature Management, but make sure its done in the
* last {@code AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @throws IOException Thrown when processing key/value failed when reading feature
* flags
* @return Updated Feature Set from Property Source
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
settingSelector.setKeyFilter(".appconfig*");
List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName);
if (settings == null || features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) {
String entry = getKeyVaultEntry(setting.getValue());
if (entry != null) {
properties.put(key, entry);
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its
* entry in Key Vault.
*
* @param value {"uri":
* "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(String value) {
String secretValue = null;
try {
URI uri = null;
try {
JsonNode kvReference = mapper.readTree(value);
uri = new URI(kvReference.at("/uri").asText());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost())
.getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retrieving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
KeyVaultClient getKeyVaultClient(URI uri, String uriHost) {
return keyVaultClients.computeIfAbsent(uriHost, ignored ->
new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider));
}
/**
* Initializes Feature Management configurations. Only one
* {@code AppConfigurationPropertySource} can call this, and it needs to be done after
* the rest have run initProperties.
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
/**
* Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}.
*
* @param featureSet The parsed KeyValueItems will be added to this
* @param settings New items read in from Azure
* @param date Cache timestamp
* @throws IOException
*/
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
Object feature = createFeature(setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into
* properties.
* @return Feature created from KeyValueItem
* @throws IOException
*/
private Object createFeature(ConfigurationSetting item) throws IOException {
Feature feature = null;
if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) {
try {
String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class);
feature = new Feature(key, featureItem);
if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) {
return true;
} else if (!featureItem.getEnabled()) {
return false;
}
return feature;
} catch (IOException e) {
throw new IOException("Unable to parse Feature Management values from Azure.", e);
}
} else {
String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(),
item.getContentType());
throw new IOException(message);
}
}
} |
Hmm, so both are right. I read through the `toArray(T[])` documentation further and it explains that if the passed array is large enough to fit the elements of the `Collection`, or `Set` in this case, that array will be populated and returned. If the passed array isn't large enough a new one will be allocated internally that is large enough to hold all elements of the `Collection`. So, the empty array is slightly more agnostic as it'll guarantee the correct sized array is returned. Both will work, but I'll revert this. | public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[0]);
} | return keySet.toArray(new String[0]); | public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[keySet.size()]);
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private final String context;
private Map<String, Object> properties = new LinkedHashMap<>();
private final String label;
private AppConfigurationProperties appConfigurationProperties;
private static ObjectMapper mapper = new ObjectMapper();
private HashMap<String, KeyVaultClient> keyVaultClients;
private ClientStore clients;
private KeyVaultCredentialProvider keyVaultCredentialProvider;
private SecretClientBuilderSetup keyVaultClientProvider;
private AppConfigurationProviderProperties appProperties;
private ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
@Override
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call
* {@code initFeatures} to update Feature Management, but make sure its done in the
* last {@code AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @throws IOException Thrown when processing key/value failed when reading feature
* flags
* @return Updated Feature Set from Property Source
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
settingSelector.setKeyFilter(".appconfig*");
List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName);
if (settings == null || features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) {
String entry = getKeyVaultEntry(setting.getValue());
if (entry != null) {
properties.put(key, entry);
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its
* entry in Key Vault.
*
* @param value {"uri":
* "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(String value) {
String secretValue = null;
try {
URI uri = null;
try {
JsonNode kvReference = mapper.readTree(value);
uri = new URI(kvReference.at("/uri").asText());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost())
.getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retrieving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
KeyVaultClient getKeyVaultClient(URI uri, String uriHost) {
return keyVaultClients.computeIfAbsent(uriHost, ignored ->
new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider));
}
/**
* Initializes Feature Management configurations. Only one
* {@code AppConfigurationPropertySource} can call this, and it needs to be done after
* the rest have run initProperties.
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
/**
* Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}.
*
* @param featureSet The parsed KeyValueItems will be added to this
* @param settings New items read in from Azure
* @param date Cache timestamp
* @throws IOException
*/
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
Object feature = createFeature(setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into
* properties.
* @return Feature created from KeyValueItem
* @throws IOException
*/
private Object createFeature(ConfigurationSetting item) throws IOException {
Feature feature = null;
if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) {
try {
String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class);
feature = new Feature(key, featureItem);
if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) {
return true;
} else if (!featureItem.getEnabled()) {
return false;
}
return feature;
} catch (IOException e) {
throw new IOException("Unable to parse Feature Management values from Azure.", e);
}
} else {
String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(),
item.getContentType());
throw new IOException(message);
}
}
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private final String context;
private Map<String, Object> properties = new LinkedHashMap<>();
private final String label;
private AppConfigurationProperties appConfigurationProperties;
private static ObjectMapper mapper = new ObjectMapper();
private HashMap<String, KeyVaultClient> keyVaultClients;
private ClientStore clients;
private KeyVaultCredentialProvider keyVaultCredentialProvider;
private SecretClientBuilderSetup keyVaultClientProvider;
private AppConfigurationProviderProperties appProperties;
private ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
@Override
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call
* {@code initFeatures} to update Feature Management, but make sure its done in the
* last {@code AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @throws IOException Thrown when processing key/value failed when reading feature
* flags
* @return Updated Feature Set from Property Source
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
settingSelector.setKeyFilter(".appconfig*");
List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName);
if (settings == null || features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) {
String entry = getKeyVaultEntry(setting.getValue());
if (entry != null) {
properties.put(key, entry);
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its
* entry in Key Vault.
*
* @param value {"uri":
* "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(String value) {
String secretValue = null;
try {
URI uri = null;
try {
JsonNode kvReference = mapper.readTree(value);
uri = new URI(kvReference.at("/uri").asText());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost())
.getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retrieving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
KeyVaultClient getKeyVaultClient(URI uri, String uriHost) {
return keyVaultClients.computeIfAbsent(uriHost, ignored ->
new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider));
}
/**
* Initializes Feature Management configurations. Only one
* {@code AppConfigurationPropertySource} can call this, and it needs to be done after
* the rest have run initProperties.
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
/**
* Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}.
*
* @param featureSet The parsed KeyValueItems will be added to this
* @param settings New items read in from Azure
* @param date Cache timestamp
* @throws IOException
*/
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
Object feature = createFeature(setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into
* properties.
* @return Feature created from KeyValueItem
* @throws IOException
*/
private Object createFeature(ConfigurationSetting item) throws IOException {
Feature feature = null;
if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) {
try {
String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class);
feature = new Feature(key, featureItem);
if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) {
return true;
} else if (!featureItem.getEnabled()) {
return false;
}
return feature;
} catch (IOException e) {
throw new IOException("Unable to parse Feature Management values from Azure.", e);
}
} else {
String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(),
item.getContentType());
throw new IOException(message);
}
}
} |
For reference: https://docs.oracle.com/javase/8/docs/api/java/util/AbstractCollection.html#toArray-T:A- | public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[0]);
} | return keySet.toArray(new String[0]); | public String[] getPropertyNames() {
Set<String> keySet = properties.keySet();
return keySet.toArray(new String[keySet.size()]);
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private final String context;
private Map<String, Object> properties = new LinkedHashMap<>();
private final String label;
private AppConfigurationProperties appConfigurationProperties;
private static ObjectMapper mapper = new ObjectMapper();
private HashMap<String, KeyVaultClient> keyVaultClients;
private ClientStore clients;
private KeyVaultCredentialProvider keyVaultCredentialProvider;
private SecretClientBuilderSetup keyVaultClientProvider;
private AppConfigurationProviderProperties appProperties;
private ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
@Override
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call
* {@code initFeatures} to update Feature Management, but make sure its done in the
* last {@code AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @throws IOException Thrown when processing key/value failed when reading feature
* flags
* @return Updated Feature Set from Property Source
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
settingSelector.setKeyFilter(".appconfig*");
List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName);
if (settings == null || features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) {
String entry = getKeyVaultEntry(setting.getValue());
if (entry != null) {
properties.put(key, entry);
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its
* entry in Key Vault.
*
* @param value {"uri":
* "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(String value) {
String secretValue = null;
try {
URI uri = null;
try {
JsonNode kvReference = mapper.readTree(value);
uri = new URI(kvReference.at("/uri").asText());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost())
.getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retrieving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
KeyVaultClient getKeyVaultClient(URI uri, String uriHost) {
return keyVaultClients.computeIfAbsent(uriHost, ignored ->
new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider));
}
/**
* Initializes Feature Management configurations. Only one
* {@code AppConfigurationPropertySource} can call this, and it needs to be done after
* the rest have run initProperties.
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
/**
* Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}.
*
* @param featureSet The parsed KeyValueItems will be added to this
* @param settings New items read in from Azure
* @param date Cache timestamp
* @throws IOException
*/
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
Object feature = createFeature(setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into
* properties.
* @return Feature created from KeyValueItem
* @throws IOException
*/
private Object createFeature(ConfigurationSetting item) throws IOException {
Feature feature = null;
if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) {
try {
String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class);
feature = new Feature(key, featureItem);
if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) {
return true;
} else if (!featureItem.getEnabled()) {
return false;
}
return feature;
} catch (IOException e) {
throw new IOException("Unable to parse Feature Management values from Azure.", e);
}
} else {
String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(),
item.getContentType());
throw new IOException(message);
}
}
} | class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> {
private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class);
private final String context;
private Map<String, Object> properties = new LinkedHashMap<>();
private final String label;
private AppConfigurationProperties appConfigurationProperties;
private static ObjectMapper mapper = new ObjectMapper();
private HashMap<String, KeyVaultClient> keyVaultClients;
private ClientStore clients;
private KeyVaultCredentialProvider keyVaultCredentialProvider;
private SecretClientBuilderSetup keyVaultClientProvider;
private AppConfigurationProviderProperties appProperties;
private ConfigStore configStore;
AppConfigurationPropertySource(String context, ConfigStore configStore, String label,
AppConfigurationProperties appConfigurationProperties, ClientStore clients,
AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider,
SecretClientBuilderSetup keyVaultClientProvider) {
super(context + configStore.getEndpoint() + "/" + label);
this.context = context;
this.configStore = configStore;
this.label = label;
this.appConfigurationProperties = appConfigurationProperties;
this.appProperties = appProperties;
this.keyVaultClients = new HashMap<String, KeyVaultClient>();
this.clients = clients;
this.keyVaultCredentialProvider = keyVaultCredentialProvider;
this.keyVaultClientProvider = keyVaultClientProvider;
}
@Override
@Override
public Object getProperty(String name) {
return properties.get(name);
}
/**
* <p>
* Gets settings from Azure/Cache to set as configurations. Updates the cache.
* </p>
*
* <p>
* <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call
* {@code initFeatures} to update Feature Management, but make sure its done in the
* last {@code AppConfigurationPropertySource}
* </p>
*
* @param featureSet The set of Feature Management Flags from various config stores.
* @throws IOException Thrown when processing key/value failed when reading feature
* flags
* @return Updated Feature Set from Property Source
*/
FeatureSet initProperties(FeatureSet featureSet) throws IOException {
String storeName = configStore.getEndpoint();
Date date = new Date();
SettingSelector settingSelector = new SettingSelector().setLabelFilter(label);
settingSelector.setKeyFilter(context + "*");
List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName);
settingSelector.setKeyFilter(".appconfig*");
List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName);
if (settings == null || features == null) {
throw new IOException("Unable to load properties from App Configuration Store.");
}
for (ConfigurationSetting setting : settings) {
String key = setting.getKey().trim().substring(context.length()).replace('/', '.');
if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) {
String entry = getKeyVaultEntry(setting.getValue());
if (entry != null) {
properties.put(key, entry);
}
} else {
properties.put(key, setting.getValue());
}
}
return addToFeatureSet(featureSet, features, date);
}
/**
* Given a Setting's Key Vault Reference stored in the Settings value, it will get its
* entry in Key Vault.
*
* @param value {"uri":
* "<your-vault-url>/secret/<secret>/<version>"}
* @return Key Vault Secret Value
*/
private String getKeyVaultEntry(String value) {
String secretValue = null;
try {
URI uri = null;
try {
JsonNode kvReference = mapper.readTree(value);
uri = new URI(kvReference.at("/uri").asText());
} catch (URISyntaxException e) {
LOGGER.error("Error Processing Key Vault Entry URI.");
ReflectionUtils.rethrowRuntimeException(e);
}
KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost())
.getSecret(uri, appProperties.getMaxRetryTime());
if (secret == null) {
throw new IOException("No Key Vault Secret found for Reference.");
}
secretValue = secret.getValue();
} catch (RuntimeException | IOException e) {
LOGGER.error("Error Retrieving Key Vault Entry");
ReflectionUtils.rethrowRuntimeException(e);
}
return secretValue;
}
KeyVaultClient getKeyVaultClient(URI uri, String uriHost) {
return keyVaultClients.computeIfAbsent(uriHost, ignored ->
new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider));
}
/**
* Initializes Feature Management configurations. Only one
* {@code AppConfigurationPropertySource} can call this, and it needs to be done after
* the rest have run initProperties.
* @param featureSet Feature Flag info to be set to this property source.
*/
void initFeatures(FeatureSet featureSet) {
ObjectMapper featureMapper = new ObjectMapper();
featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE);
properties.put(FEATURE_MANAGEMENT_KEY,
featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class));
}
/**
* Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}.
*
* @param featureSet The parsed KeyValueItems will be added to this
* @param settings New items read in from Azure
* @param date Cache timestamp
* @throws IOException
*/
private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date)
throws IOException {
for (ConfigurationSetting setting : settings) {
Object feature = createFeature(setting);
if (feature != null) {
featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature);
}
}
return featureSet;
}
/**
* Creates a {@code Feature} from a {@code KeyValueItem}
*
* @param item Used to create Features before being converted to be set into
* properties.
* @return Feature created from KeyValueItem
* @throws IOException
*/
private Object createFeature(ConfigurationSetting item) throws IOException {
Feature feature = null;
if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) {
try {
String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length());
FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class);
feature = new Feature(key, featureItem);
if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) {
return true;
} else if (!featureItem.getEnabled()) {
return false;
}
return feature;
} catch (IOException e) {
throw new IOException("Unable to parse Feature Management values from Azure.", e);
}
} else {
String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(),
item.getContentType());
throw new IOException(message);
}
}
} |
maybe "Transitive closures from azure-core will be ignored." | public TransformationResult tryTransform(@Nullable E oldElement, @Nullable E newElement, Difference difference) {
String newArchive = difference.attachments.get("newArchive");
String newArchiveRole = difference.attachments.get("newArchiveRole");
if (newArchive == null) {
return TransformationResult.keep();
}
if (!SUPPLEMENTARY.equalsIgnoreCase(newArchiveRole)) {
return TransformationResult.keep();
}
if (!CORE_ARCHIVE.matcher(newArchive).matches()) {
return TransformationResult.keep();
}
if (difference.criticality == Criticality.ERROR) {
return TransformationResult.replaceWith(Difference.copy(difference)
.withCriticality(Criticality.DOCUMENTED)
.withJustification("Transitive changes from Core libraries should be ignored.")
.build());
} else {
return TransformationResult.keep();
}
} | .withJustification("Transitive changes from Core libraries should be ignored.") | public TransformationResult tryTransform(@Nullable E oldElement, @Nullable E newElement, Difference difference) {
String newArchive = difference.attachments.get("newArchive");
String newArchiveRole = difference.attachments.get("newArchiveRole");
if (newArchive == null) {
return TransformationResult.keep();
}
if (!SUPPLEMENTARY.equalsIgnoreCase(newArchiveRole)) {
return TransformationResult.keep();
}
if (!CORE_ARCHIVE.matcher(newArchive).matches()) {
return TransformationResult.keep();
}
if (difference.criticality == Criticality.ERROR) {
return TransformationResult.discard();
} else {
return TransformationResult.keep();
}
} | class TransitiveCoreChangesTransform<E extends Element<E>> extends BaseDifferenceTransform<E> {
private static final Pattern CORE_ARCHIVE = Pattern.compile("com\\.azure:azure-core:.*");
private static final String SUPPLEMENTARY = Archive.Role.SUPPLEMENTARY.toString();
@Override
public Pattern[] getDifferenceCodePatterns() {
return new Pattern[] { Pattern.compile(".*") };
}
@Override
public String getExtensionId() {
return "transitive-core-changes";
}
@Override
} | class TransitiveCoreChangesTransform<E extends Element<E>> extends BaseDifferenceTransform<E> {
private static final Pattern CORE_ARCHIVE = Pattern.compile("com\\.azure:azure-core:.*");
private static final String SUPPLEMENTARY = Archive.Role.SUPPLEMENTARY.toString();
@Override
public Pattern[] getDifferenceCodePatterns() {
return new Pattern[] { Pattern.compile(".*") };
}
@Override
public String getExtensionId() {
return "transitive-core-changes";
}
@Override
} |
resetWriterIndex is necessary here? | public static byte[] encode(final UUID uuid) {
final byte[] bytes = new byte[2 * Long.BYTES];
encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex());
return bytes;
} | encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex()); | public static byte[] encode(final UUID uuid) {
final byte[] bytes = new byte[2 * Long.BYTES];
encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex());
return bytes;
} | class RntbdUUID {
public static final UUID EMPTY = new UUID(0L, 0L);
private RntbdUUID() {
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param bytes a {@link byte} array containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final byte[] bytes) {
return decode(Unpooled.wrappedBuffer(bytes));
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param in a {@link ByteBuf} containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final ByteBuf in) {
checkNotNull(in, "in");
if (in.readableBytes() < 2 * Long.BYTES) {
final String reason = Strings.lenientFormat("invalid frame length: %s", in.readableBytes());
throw new CorruptedFrameException(reason);
}
long mostSignificantBits = in.readUnsignedIntLE() << 32;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()) << 16;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE());
long leastSignificantBits = (0x000000000000FFFFL & in.readShort()) << (32 + 16);
for (int shift = 32 + 8; shift >= 0; shift -= 8) {
leastSignificantBits |= (0x00000000000000FFL & in.readByte()) << shift;
}
return new UUID(mostSignificantBits, leastSignificantBits);
}
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @return a new byte array containing the encoded
*/
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @param out an output {@link ByteBuf}
*/
public static void encode(final UUID uuid, final ByteBuf out) {
final long mostSignificantBits = uuid.getMostSignificantBits();
out.writeIntLE((int)((mostSignificantBits & 0xFFFFFFFF00000000L) >>> 32));
out.writeShortLE((short)((mostSignificantBits & 0x00000000FFFF0000L) >>> 16));
out.writeShortLE((short)(mostSignificantBits & 0x000000000000FFFFL));
final long leastSignificantBits = uuid.getLeastSignificantBits();
out.writeShort((short)((leastSignificantBits & 0xFFFF000000000000L) >>> (32 + 16)));
out.writeShort((short)((leastSignificantBits & 0x0000FFFF00000000L) >>> 32));
out.writeInt((int)(leastSignificantBits & 0x00000000FFFFFFFFL));
}
} | class RntbdUUID {
public static final UUID EMPTY = new UUID(0L, 0L);
private RntbdUUID() {
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param bytes a {@link byte} array containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final byte[] bytes) {
return decode(Unpooled.wrappedBuffer(bytes));
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param in a {@link ByteBuf} containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final ByteBuf in) {
checkNotNull(in, "in");
if (in.readableBytes() < 2 * Long.BYTES) {
final String reason = Strings.lenientFormat("invalid frame length: %s", in.readableBytes());
throw new CorruptedFrameException(reason);
}
long mostSignificantBits = in.readUnsignedIntLE() << 32;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()) << 16;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE());
long leastSignificantBits = (0x000000000000FFFFL & in.readShort()) << (32 + 16);
for (int shift = 32 + 8; shift >= 0; shift -= 8) {
leastSignificantBits |= (0x00000000000000FFL & in.readByte()) << shift;
}
return new UUID(mostSignificantBits, leastSignificantBits);
}
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @return a new byte array containing the encoded
*/
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @param out an output {@link ByteBuf}
*/
public static void encode(final UUID uuid, final ByteBuf out) {
final long mostSignificantBits = uuid.getMostSignificantBits();
out.writeIntLE((int)((mostSignificantBits & 0xFFFFFFFF00000000L) >>> 32));
out.writeShortLE((short)((mostSignificantBits & 0x00000000FFFF0000L) >>> 16));
out.writeShortLE((short)(mostSignificantBits & 0x000000000000FFFFL));
final long leastSignificantBits = uuid.getLeastSignificantBits();
out.writeShort((short)((leastSignificantBits & 0xFFFF000000000000L) >>> (32 + 16)));
out.writeShort((short)((leastSignificantBits & 0x0000FFFF00000000L) >>> 32));
out.writeInt((int)(leastSignificantBits & 0x00000000FFFFFFFFL));
}
} |
yes - writerindex is at the end of teh byte[] after the wrappedBuffer call | public static byte[] encode(final UUID uuid) {
final byte[] bytes = new byte[2 * Long.BYTES];
encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex());
return bytes;
} | encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex()); | public static byte[] encode(final UUID uuid) {
final byte[] bytes = new byte[2 * Long.BYTES];
encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex());
return bytes;
} | class RntbdUUID {
public static final UUID EMPTY = new UUID(0L, 0L);
private RntbdUUID() {
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param bytes a {@link byte} array containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final byte[] bytes) {
return decode(Unpooled.wrappedBuffer(bytes));
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param in a {@link ByteBuf} containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final ByteBuf in) {
checkNotNull(in, "in");
if (in.readableBytes() < 2 * Long.BYTES) {
final String reason = Strings.lenientFormat("invalid frame length: %s", in.readableBytes());
throw new CorruptedFrameException(reason);
}
long mostSignificantBits = in.readUnsignedIntLE() << 32;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()) << 16;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE());
long leastSignificantBits = (0x000000000000FFFFL & in.readShort()) << (32 + 16);
for (int shift = 32 + 8; shift >= 0; shift -= 8) {
leastSignificantBits |= (0x00000000000000FFL & in.readByte()) << shift;
}
return new UUID(mostSignificantBits, leastSignificantBits);
}
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @return a new byte array containing the encoded
*/
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @param out an output {@link ByteBuf}
*/
public static void encode(final UUID uuid, final ByteBuf out) {
final long mostSignificantBits = uuid.getMostSignificantBits();
out.writeIntLE((int)((mostSignificantBits & 0xFFFFFFFF00000000L) >>> 32));
out.writeShortLE((short)((mostSignificantBits & 0x00000000FFFF0000L) >>> 16));
out.writeShortLE((short)(mostSignificantBits & 0x000000000000FFFFL));
final long leastSignificantBits = uuid.getLeastSignificantBits();
out.writeShort((short)((leastSignificantBits & 0xFFFF000000000000L) >>> (32 + 16)));
out.writeShort((short)((leastSignificantBits & 0x0000FFFF00000000L) >>> 32));
out.writeInt((int)(leastSignificantBits & 0x00000000FFFFFFFFL));
}
} | class RntbdUUID {
public static final UUID EMPTY = new UUID(0L, 0L);
private RntbdUUID() {
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param bytes a {@link byte} array containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final byte[] bytes) {
return decode(Unpooled.wrappedBuffer(bytes));
}
/**
* Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param in a {@link ByteBuf} containing the serialized {@link UUID} to be decoded
* @return a new {@link UUID}
*/
public static UUID decode(final ByteBuf in) {
checkNotNull(in, "in");
if (in.readableBytes() < 2 * Long.BYTES) {
final String reason = Strings.lenientFormat("invalid frame length: %s", in.readableBytes());
throw new CorruptedFrameException(reason);
}
long mostSignificantBits = in.readUnsignedIntLE() << 32;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()) << 16;
mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE());
long leastSignificantBits = (0x000000000000FFFFL & in.readShort()) << (32 + 16);
for (int shift = 32 + 8; shift >= 0; shift -= 8) {
leastSignificantBits |= (0x00000000000000FFL & in.readByte()) << shift;
}
return new UUID(mostSignificantBits, leastSignificantBits);
}
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @return a new byte array containing the encoded
*/
/**
* Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray}
*
* @param uuid a {@link UUID} to be encoded
* @param out an output {@link ByteBuf}
*/
public static void encode(final UUID uuid, final ByteBuf out) {
final long mostSignificantBits = uuid.getMostSignificantBits();
out.writeIntLE((int)((mostSignificantBits & 0xFFFFFFFF00000000L) >>> 32));
out.writeShortLE((short)((mostSignificantBits & 0x00000000FFFF0000L) >>> 16));
out.writeShortLE((short)(mostSignificantBits & 0x000000000000FFFFL));
final long leastSignificantBits = uuid.getLeastSignificantBits();
out.writeShort((short)((leastSignificantBits & 0xFFFF000000000000L) >>> (32 + 16)));
out.writeShort((short)((leastSignificantBits & 0x0000FFFF00000000L) >>> 32));
out.writeInt((int)(leastSignificantBits & 0x00000000FFFFFFFFL));
}
} |
Are we allowed to have no hostname in listener? Ideally if we want to clear a hostname, code should provide a `withoutHostname` in update flow. | public ApplicationGatewayListenerImpl withHostname(String hostname) {
this.innerModel().withHostname(null);
if (hostname == null) {
this.innerModel().withHostNames(null);
} else {
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
} | this.innerModel().withHostNames(null); | public ApplicationGatewayListenerImpl withHostname(String hostname) {
if (hostname != null) {
this.innerModel().withHostname(null);
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostname(null);
this.innerModel().withHostNames(hostnames);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} |
Do we need this test? | public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
String listenerName = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listenerName)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listenerName)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listenerName).hostname());
gateway.innerModel().httpListeners().iterator().next().withHostNames(null).withHostname(hostname1);
gateway.update()
.apply();
Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname());
} | Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname()); | public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
String listener1 = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listener1)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listener1).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listener1)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listener1).hostname());
List<String> hostnames = new ArrayList<>();
hostnames.add(hostname1);
hostnames.add(hostname2);
gateway.update()
.updateListener(listener1)
.withHostnames(hostnames)
.parent()
.apply();
Assertions.assertEquals(hostnames, gateway.listeners().get(listener1).hostnames());
} | class ApplicationGatewayTests extends NetworkManagementTest {
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertTrue(appGateway != null);
Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier()));
Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size()));
Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2);
Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5);
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200);
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions
.assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64);
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(),
"RequestHeaderNames");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(),
"StartsWith");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent");
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(),
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION");
}
@Test
@Test
@Disabled("Need client id for key vault usage")
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
} | class ApplicationGatewayTests extends NetworkManagementTest {
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertTrue(appGateway != null);
Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier()));
Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size()));
Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2);
Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5);
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200);
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions
.assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64);
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(),
"RequestHeaderNames");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(),
"StartsWith");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent");
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(),
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION");
}
@Test
@Test
@Disabled("Need client id for key vault usage")
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
} |
It's for confirmation for service response of replacing `hostnames` with `hostname`. I wanted to try the other way around but it's much more complicated.. | public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
String listenerName = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listenerName)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listenerName)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listenerName).hostname());
gateway.innerModel().httpListeners().iterator().next().withHostNames(null).withHostname(hostname1);
gateway.update()
.apply();
Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname());
} | Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname()); | public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
String listener1 = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listener1)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listener1).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listener1)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listener1).hostname());
List<String> hostnames = new ArrayList<>();
hostnames.add(hostname1);
hostnames.add(hostname2);
gateway.update()
.updateListener(listener1)
.withHostnames(hostnames)
.parent()
.apply();
Assertions.assertEquals(hostnames, gateway.listeners().get(listener1).hostnames());
} | class ApplicationGatewayTests extends NetworkManagementTest {
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertTrue(appGateway != null);
Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier()));
Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size()));
Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2);
Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5);
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200);
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions
.assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64);
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(),
"RequestHeaderNames");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(),
"StartsWith");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent");
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(),
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION");
}
@Test
@Test
@Disabled("Need client id for key vault usage")
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
} | class ApplicationGatewayTests extends NetworkManagementTest {
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertTrue(appGateway != null);
Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier()));
Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size()));
Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2);
Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5);
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200);
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions
.assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64);
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(),
"RequestHeaderNames");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(),
"StartsWith");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent");
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(),
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION");
}
@Test
@Test
@Disabled("Need client id for key vault usage")
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
} |
You should be able to replace this case with `withHostnames` if you support it? | public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
String listenerName = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listenerName)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listenerName)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listenerName).hostname());
gateway.innerModel().httpListeners().iterator().next().withHostNames(null).withHostname(hostname1);
gateway.update()
.apply();
Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname());
} | Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname()); | public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
String listener1 = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listener1)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listener1).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listener1)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listener1).hostname());
List<String> hostnames = new ArrayList<>();
hostnames.add(hostname1);
hostnames.add(hostname2);
gateway.update()
.updateListener(listener1)
.withHostnames(hostnames)
.parent()
.apply();
Assertions.assertEquals(hostnames, gateway.listeners().get(listener1).hostnames());
} | class ApplicationGatewayTests extends NetworkManagementTest {
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertTrue(appGateway != null);
Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier()));
Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size()));
Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2);
Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5);
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200);
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions
.assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64);
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(),
"RequestHeaderNames");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(),
"StartsWith");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent");
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(),
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION");
}
@Test
@Test
@Disabled("Need client id for key vault usage")
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
} | class ApplicationGatewayTests extends NetworkManagementTest {
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertTrue(appGateway != null);
Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier()));
Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size()));
Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2);
Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5);
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200);
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions
.assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64);
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(),
"RequestHeaderNames");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(),
"StartsWith");
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent");
Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1);
Assertions
.assertEquals(
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(),
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION");
}
@Test
@Test
@Disabled("Need client id for key vault usage")
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String appPublicIp = generateRandomResourceName("pip", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip =
networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
} |
Move this line into `if (hostname != null) {`? | public ApplicationGatewayListenerImpl withHostname(String hostname) {
this.innerModel().withHostname(null);
if (hostname != null) {
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
} | this.innerModel().withHostname(null); | public ApplicationGatewayListenerImpl withHostname(String hostname) {
if (hostname != null) {
this.innerModel().withHostname(null);
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
this.innerModel().withHostname(null);
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostNames(hostnames);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostname(null);
this.innerModel().withHostNames(hostnames);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} |
Move this line into `if (hostname != null) {`? | public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
this.innerModel().withHostname(null);
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostNames(hostnames);
}
return this;
} | this.innerModel().withHostname(null); | public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostname(null);
this.innerModel().withHostNames(hostnames);
}
return this;
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHostname(String hostname) {
this.innerModel().withHostname(null);
if (hostname != null) {
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHostname(String hostname) {
if (hostname != null) {
this.innerModel().withHostname(null);
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} |
Moved, thanks... | public ApplicationGatewayListenerImpl withHostname(String hostname) {
this.innerModel().withHostname(null);
if (hostname != null) {
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
} | this.innerModel().withHostname(null); | public ApplicationGatewayListenerImpl withHostname(String hostname) {
if (hostname != null) {
this.innerModel().withHostname(null);
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
this.innerModel().withHostname(null);
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostNames(hostnames);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostname(null);
this.innerModel().withHostNames(hostnames);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} |
Moved. | public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
this.innerModel().withHostname(null);
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostNames(hostnames);
}
return this;
} | this.innerModel().withHostname(null); | public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) {
if (!CoreUtils.isNullOrEmpty(hostnames)) {
this.innerModel().withHostname(null);
this.innerModel().withHostNames(hostnames);
}
return this;
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHostname(String hostname) {
this.innerModel().withHostname(null);
if (hostname != null) {
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} | class ApplicationGatewayListenerImpl
extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway>
implements ApplicationGatewayListener,
ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>,
ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>,
ApplicationGatewayListener.Update {
ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) {
super(inner, parent);
}
@Override
public String networkId() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.networkId();
} else {
return null;
}
}
@Override
public String subnetName() {
ApplicationGatewayFrontend frontend = this.frontend();
if (frontend != null) {
return frontend.subnetName();
} else {
return null;
}
}
@Override
public boolean requiresServerNameIndication() {
if (this.innerModel().requireServerNameIndication() != null) {
return this.innerModel().requireServerNameIndication();
} else {
return false;
}
}
@Override
public String hostname() {
if (this.innerModel().hostname() != null) {
return this.innerModel().hostname();
}
if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return this.innerModel().hostNames().get(0);
}
return null;
}
@Override
public List<String> hostnames() {
if (this.innerModel().hostname() != null) {
return Collections.singletonList(this.innerModel().hostname());
}
if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.innerModel().hostNames());
}
@Override
public String publicIpAddressId() {
final ApplicationGatewayFrontend frontend = this.frontend();
if (frontend == null) {
return null;
} else {
return frontend.publicIpAddressId();
}
}
@Override
public PublicIpAddress getPublicIpAddress() {
return this.getPublicIpAddressAsync().block();
}
@Override
public Mono<PublicIpAddress> getPublicIpAddressAsync() {
String pipId = this.publicIpAddressId();
return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId);
}
@Override
public String name() {
return this.innerModel().name();
}
@Override
public ApplicationGatewaySslCertificate sslCertificate() {
SubResource certRef = this.innerModel().sslCertificate();
if (certRef == null) {
return null;
}
String name = ResourceUtils.nameFromResourceId(certRef.id());
return this.parent().sslCertificates().get(name);
}
@Override
public ApplicationGatewayProtocol protocol() {
return this.innerModel().protocol();
}
@Override
public int frontendPortNumber() {
String name = this.frontendPortName();
if (name == null) {
return 0;
} else if (!this.parent().frontendPorts().containsKey(name)) {
return 0;
} else {
return this.parent().frontendPorts().get(name);
}
}
@Override
public String frontendPortName() {
if (this.innerModel().frontendPort() != null) {
return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id());
} else {
return null;
}
}
@Override
public ApplicationGatewayFrontend frontend() {
final SubResource frontendInner = this.innerModel().frontendIpConfiguration();
if (frontendInner == null) {
return null;
} else {
final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id());
return this.parent().frontends().get(frontendName);
}
}
@Override
public ApplicationGatewayImpl attach() {
this.parent().withHttpListener(this);
return this.parent();
}
private ApplicationGatewayListenerImpl withFrontend(String name) {
SubResource frontendRef =
new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name);
this.innerModel().withFrontendIpConfiguration(frontendRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(String name) {
SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name);
this.innerModel().withFrontendPort(portRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) {
String portName = this.parent().frontendPortNameFromNumber(portNumber);
if (portName == null) {
portName = this.parent().manager().resourceManager().internalContext()
.randomResourceName("port", 9);
this.parent().withFrontendPort(portNumber, portName);
}
return this.withFrontendPort(portName);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificate(String name) {
SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name);
this.innerModel().withSslCertificate(certRef);
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) {
return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(
String keyVaultSecretId, String name) {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach();
return this;
}
@Override
public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException {
return withSslCertificateFromPfxFile(pfxFile, null);
}
private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException {
if (name == null) {
name = this.parent().manager().resourceManager().internalContext()
.randomResourceName("cert", 10);
}
this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach();
return this.withSslCertificate(name);
}
@Override
public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) {
ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate();
if (sslCert != null) {
sslCert.withPfxPassword(password);
}
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttp() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHttps() {
this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS);
return this;
}
@Override
public ApplicationGatewayListenerImpl withHostname(String hostname) {
if (hostname != null) {
this.innerModel().withHostname(null);
List<String> hostNames = new ArrayList<>();
hostNames.add(hostname);
this.innerModel().withHostNames(hostNames);
}
return this;
}
@Override
@Override
public ApplicationGatewayListenerImpl withServerNameIndication() {
this.innerModel().withRequireServerNameIndication(true);
return this;
}
@Override
public ApplicationGatewayListenerImpl withoutServerNameIndication() {
this.innerModel().withRequireServerNameIndication(false);
return this;
}
@Override
public ApplicationGatewayListenerImpl withPrivateFrontend() {
this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name());
return this;
}
@Override
public ApplicationGatewayListenerImpl withPublicFrontend() {
this.withFrontend(this.parent().ensureDefaultPublicFrontend().name());
return this;
}
} |
I am not sure this will inject the codesnippet correctly. I think both `BEGIN` and the tagname have to be on the same line. Same with `END` below. Could you please verify that the javadocs are generated with this codesnippet correctly? | public static void main(String[] args) {
DeviceManagementClient deviceManagementClient =
new DeviceManagementClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint("contoso.api.adu.microsoft.com")
.instanceId("blue")
.buildClient();
RequestOptions requestOptions = new RequestOptions();
requestOptions.addQueryParam("action", "cancel");
Response<BinaryData> response =
deviceManagementClient.stopDeploymentWithResponse("TestGroup", "deploymentId", requestOptions);
} | public static void main(String[] args) {
DeviceManagementClient deviceManagementClient =
new DeviceManagementClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint("contoso.api.adu.microsoft.com")
.instanceId("blue")
.buildClient();
RequestOptions requestOptions = new RequestOptions();
requestOptions.addQueryParam("action", "cancel");
Response<BinaryData> response =
deviceManagementClient.stopDeploymentWithResponse("TestGroup", "deploymentId", requestOptions);
} | class DeviceManagementCancelOrRetryDeployment {
} | class DeviceManagementCancelOrRetryDeployment {
} | |
Ah, the line is too long and it get broken by the javaformat... One solution might be to limit the length of the tagname/identifier. Changing the javaformat for this particular case in a particular folder seems harder. | public static void main(String[] args) {
DeviceManagementClient deviceManagementClient =
new DeviceManagementClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint("contoso.api.adu.microsoft.com")
.instanceId("blue")
.buildClient();
RequestOptions requestOptions = new RequestOptions();
requestOptions.addQueryParam("action", "cancel");
Response<BinaryData> response =
deviceManagementClient.stopDeploymentWithResponse("TestGroup", "deploymentId", requestOptions);
} | public static void main(String[] args) {
DeviceManagementClient deviceManagementClient =
new DeviceManagementClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.endpoint("contoso.api.adu.microsoft.com")
.instanceId("blue")
.buildClient();
RequestOptions requestOptions = new RequestOptions();
requestOptions.addQueryParam("action", "cancel");
Response<BinaryData> response =
deviceManagementClient.stopDeploymentWithResponse("TestGroup", "deploymentId", requestOptions);
} | class DeviceManagementCancelOrRetryDeployment {
} | class DeviceManagementCancelOrRetryDeployment {
} | |
If prefetch is enabled and the message was not emitted, we should not increment this right? | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | numberConsumed++; | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} |
Good catch, didn't realize, I was using Strong consistency. Will fix it. | public void queryDiagnosticsOnOrderBy() {
String containerId = "testcontainer";
cosmosAsyncDatabase.createContainer(containerId, "/mypk",
ThroughputProperties.createManualThroughput(40000)).block();
CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
testcontainer.createItem(getInternalObjectNode()).block();
options.setMaxDegreeOfParallelism(-1);
String query = "SELECT * from c ORDER BY c._ts DESC";
CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options,
InternalObjectNode.class);
AtomicInteger counterPkRid = new AtomicInteger();
AtomicInteger counterPartitionKeyRangeId = new AtomicInteger();
cosmosPagedFlux.byPage().flatMap(feedResponse -> {
String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString();
Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\"");
Matcher matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
counterPartitionKeyRangeId.incrementAndGet();
}
pattern = Pattern.compile("pkrId:");
matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
counterPkRid.incrementAndGet();
}
return Flux.just(feedResponse);
}).blockLast();
assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get());
deleteCollection(testcontainer);
} | assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get()); | public void queryDiagnosticsOnOrderBy() {
String containerId = "testcontainer";
cosmosAsyncDatabase.createContainer(containerId, "/mypk",
ThroughputProperties.createManualThroughput(40000)).block();
CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
testcontainer.createItem(getInternalObjectNode()).block();
options.setMaxDegreeOfParallelism(-1);
String query = "SELECT * from c ORDER BY c._ts DESC";
CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options,
InternalObjectNode.class);
Set<String> partitionKeyRangeIds = new HashSet<>();
Set<String> pkRids = new HashSet<>();
cosmosPagedFlux.byPage().flatMap(feedResponse -> {
String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString();
Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)");
Matcher matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
String group = matcher.group(2);
partitionKeyRangeIds.add(group);
}
pattern = Pattern.compile("(pkrId:)(\\d)");
matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
String group = matcher.group(2);
pkRids.add(group);
}
return Flux.just(feedResponse);
}).blockLast();
assertThat(pkRids).isNotEmpty();
assertThat(pkRids).isEqualTo(partitionKeyRangeIds);
deleteCollection(testcontainer);
} | class CosmosDiagnosticsTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT;
private CosmosClient gatewayClient;
private CosmosClient directClient;
private CosmosAsyncDatabase cosmosAsyncDatabase;
private CosmosContainer container;
private CosmosAsyncContainer cosmosAsyncContainer;
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void beforeClass() {
assertThat(this.gatewayClient).isNull();
gatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
directClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient());
cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId());
container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
if (this.gatewayClient != null) {
this.gatewayClient.close();
}
if (this.directClient != null) {
this.directClient.close();
}
}
@DataProvider(name = "query")
private Object[][] query() {
return new Object[][]{
new Object[] { "Select * from c where c.id = 'wrongId'", true },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
};
}
@DataProvider(name = "readAllItemsOfLogicalPartition")
private Object[][] readAllItemsOfLogicalPartition() {
return new Object[][]{
new Object[] { 1, true },
new Object[] { 5, null },
new Object[] { 20, null },
new Object[] { 1, false },
new Object[] { 5, false },
new Object[] { 20, false },
};
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnostics() throws Exception {
CosmosClient testGatewayClient = null;
try {
testGatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosContainer container =
testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Create\"");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient());
isValidJSON(diagnostics);
} finally {
if (testGatewayClient != null) {
testGatewayClient.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnosticsOnException() throws Exception {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
try {
createResponse = this.container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
this.container.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"statusCode\":404");
assertThat(diagnostics).contains("\"operationType\":\"Read\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient());
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
isValidJSON(diagnostics);
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void systemDiagnosticsForSystemStateInformation() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("systemInformation");
assertThat(diagnostics).contains("usedMemory");
assertThat(diagnostics).contains("availableMemory");
assertThat(diagnostics).contains("systemCpuLoad");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnostics() throws Exception {
CosmosClient testDirectClient = null;
try {
testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer cosmosContainer =
testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient());
isValidJSON(diagnostics);
try {
cosmosContainer.createItem(internalObjectNode);
fail("expected 409");
} catch (CosmosException e) {
diagnostics = e.getDiagnostics().toString();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
validateTransportRequestTimelineDirect(e.getDiagnostics().toString());
}
} finally {
if (testDirectClient != null) {
testDirectClient.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryPlanDiagnostics() throws JsonProcessingException {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
queryList.add("Select * from c");
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
queryList.add("Select * from c where c.id = 'wrongId'");
for(String query : queryList) {
int feedResponseCounter = 0;
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
if (feedResponseCounter == 0) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline());
assertThat(requestTimeLine).contains("connectionConfigured");
assertThat(requestTimeLine).contains("requestSent");
assertThat(requestTimeLine).contains("transitTime");
assertThat(requestTimeLine).contains("received");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline =");
}
feedResponseCounter++;
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithIndexMetrics() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
for (String query : queryList) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
options.setIndexMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
logger.info("This is query diagnostics {}", queryDiagnostics);
if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) {
assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull();
assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull();
}
}
}
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT)
public void queryMetrics(String query, Boolean qmEnabled) {
CosmosContainer directContainer =
this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId())
.getContainer(this.cosmosAsyncContainer.getId());
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
boolean qroupByFirstResponse = true;
Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options,
InternalObjectNode.class).iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateDirectModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
private void validateDirectModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("responseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
private void validateGatewayModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Query\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"regionsContacted\"");
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2)
public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) {
CosmosClient testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId())
.getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for (int i = 0; i < 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if (i % 20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
boolean qroupByFirstResponse = true;
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer
.queryItems(query, options, InternalObjectNode.class)
.iterableByPage()
.iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateGatewayModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithADifferentLocale() {
Locale.setDefault(Locale.GERMAN);
String query = "select * from root where root.id= \"someid\"";
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options,
InternalObjectNode.class)
.iterableByPage().iterator();
double requestCharge = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
requestCharge += feedResponse.getRequestCharge();
}
assertThat(requestCharge).isGreaterThan(0);
Locale.setDefault(Locale.ROOT);
}
private static void validateQueryDiagnostics(
String queryDiagnostics,
Boolean qmEnabled,
boolean expectQueryPlanDiagnostics) {
if (qmEnabled == null || qmEnabled) {
assertThat(queryDiagnostics).contains("Retrieved Document Count");
assertThat(queryDiagnostics).contains("Query Preparation Times");
assertThat(queryDiagnostics).contains("Runtime Execution Times");
assertThat(queryDiagnostics).contains("Partition Execution Timeline");
} else {
assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count");
assertThat(queryDiagnostics).doesNotContain("Query Preparation Times");
assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times");
assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline");
}
if (expectQueryPlanDiagnostics) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
}
}
@Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT)
public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) {
String pkValue = UUID.randomUUID().toString();
for (int i = 0; i < expectedItemCount; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue);
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
}
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options = options.setQueryMetricsEnabled(qmEnabled);
}
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5);
Iterator<FeedResponse<InternalObjectNode>> iterator =
this.container
.readAllItems(
new PartitionKey(pkValue),
options,
InternalObjectNode.class)
.iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
int actualItemCount = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
actualItemCount += feedResponse.getResults().size();
validateQueryDiagnostics(queryDiagnostics, qmEnabled, false);
}
assertThat(actualItemCount).isEqualTo(expectedItemCount);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnException() throws Exception {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
isValidJSON(diagnostics);
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient());
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnMetadataException() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class)))
.thenReturn(Mono.error(new CosmosException(400, "TestBadRequest")));
RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper());
ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient);
container.createItem(internalObjectNode);
fail("request should fail as bad request");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\"");
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
isValidJSON(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void supplementalResponseStatisticsList() throws Exception {
ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null);
for (int i = 0; i < 15; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
ObjectMapper objectMapper = new ObjectMapper();
String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
JsonNode jsonNode = objectMapper.readTree(diagnostics);
ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(15);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10);
clearStoreResponseStatistics(clientSideRequestStatistics);
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
assertThat(storeResponseStatistics.size()).isEqualTo(0);
for (int i = 0; i < 7; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
objectMapper = new ObjectMapper();
diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
jsonNode = objectMapper.readTree(diagnostics);
supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(7);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7);
for(JsonNode node : supplementalResponseStatisticsListNode) {
assertThat(node.get("storeResult").asText()).isNotNull();
String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText();
Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC));
assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000);
assertThat(node.get("requestResponseTimeUTC")).isNotNull();
assertThat(node.get("requestOperationType")).isNotNull();
assertThat(node.get("requestOperationType")).isNotNull();
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void serializationOnVariousScenarios() {
CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read();
String diagnostics = cosmosDatabase.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\"");
CosmosContainerResponse containerResponse = this.container.read();
diagnostics = containerResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\"");
TestItem testItem = new TestItem();
testItem.id = "TestId";
testItem.mypk = "TestPk";
CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
testItem.id = "TestId2";
testItem.mypk = "TestPk";
itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\"");
TestItem readTestItem = itemResponse.getItem();
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class);
InternalObjectNode properties = readItemResponse.getItem();
diagnostics = readItemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void rntbdRequestResponseLengthStatistics() throws Exception {
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem);
validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse));
try {
container.createItem(testItem);
fail("expected to fail due to 409");
} catch (CosmosException e) {
validate(e.getDiagnostics(), testItemLength, 0);
}
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void rntbdStatistics() throws Exception {
Instant beforeClientInitialization = Instant.now();
CosmosClient client1 = null;
try {
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode()
.buildClient();
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
Thread.sleep(1000);
Instant beforeInitializingRntbdServiceEndpoint = Instant.now();
CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem);
Instant afterInitializingRntbdServiceEndpoint = Instant.now();
Thread.sleep(1000);
Instant beforeOperation2 = Instant.now();
CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem);
Instant afterOperation2 = Instant.now();
Thread.sleep(1000);
Instant beforeOperation3 = Instant.now();
CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem);
Instant afterOperation3 = Instant.now();
validateRntbdStatistics(operation3Response.getDiagnostics(),
beforeClientInitialization,
beforeInitializingRntbdServiceEndpoint,
afterInitializingRntbdServiceEndpoint,
beforeOperation2,
afterOperation2,
beforeOperation3,
afterOperation3);
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
} finally {
LifeCycleUtils.closeQuietly(client1);
}
}
private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics,
Instant clientInitializationTime,
Instant beforeInitializingRntbdServiceEndpoint,
Instant afterInitializingRntbdServiceEndpoint,
Instant beforeOperation2,
Instant afterOperation2,
Instant beforeOperation3,
Instant afterOperation3) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
assertThat(storeResult).isNotNull();
boolean hasPayload = storeResult.get("exception").isNull();
assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0);
assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0);
JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics");
assertThat(serviceEndpointStatistics).isNotNull();
assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0);
assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0);
assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1);
assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false);
Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isAfterOrEqualTo(beforeInitializationThreshold);
Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isBeforeOrEqualTo(afterInitializationThreshold);
Instant afterOperation2Threshold = afterOperation2.plusMillis(2);
Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold)
.isBeforeOrEqualTo(afterOperation2Threshold);
assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold)
.isBeforeOrEqualTo(afterOperation2Threshold);
}
private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
boolean hasPayload = storeResult.get("exception").isNull();
assertThat(storeResult).isNotNull();
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize);
if (hasPayload) {
assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize);
}
assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize);
}
@Test(groups = {"emulator"}, timeOut = TIMEOUT)
public void addressResolutionStatistics() {
CosmosClient client1 = null;
CosmosClient client2 = null;
String databaseId = DatabaseForTest.generateId();
String containerId = UUID.randomUUID().toString();
CosmosDatabase cosmosDatabase = null;
CosmosContainer cosmosContainer = null;
try {
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
client1.createDatabase(databaseId);
cosmosDatabase = client1.getDatabase(databaseId);
cosmosDatabase.createContainer(containerId, "/mypk");
InternalObjectNode internalObjectNode = getInternalObjectNode();
cosmosContainer = cosmosDatabase.getContainer(containerId);
CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode);
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null");
assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" +
".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information");
client2 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosDatabase = client2.getDatabase(databaseId);
cosmosContainer = cosmosDatabase.getContainer(containerId);
AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient();
GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient,
"addressResolver", true);
@SuppressWarnings("rawtypes")
Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver,
"addressCacheByEndpoint",
true);
Object endpointCache = addressCacheByEndpoint.values().toArray()[0];
GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true);
HttpClient httpClient = httpClient(true);
FieldUtils.writeField(addressCache, "httpClient", httpClient, true);
new Thread(() -> {
try {
Thread.sleep(5000);
HttpClient httpClient1 = httpClient(false);
FieldUtils.writeField(addressCache, "httpClient", httpClient1, true);
} catch (Exception e) {
fail(e.getMessage());
}
}).start();
PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk"));
CosmosItemResponse<InternalObjectNode> readResourceResponse =
cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(),
InternalObjectNode.class);
assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" +
".channel.AbstractChannel$AnnotatedConnectException: Connection refused");
} catch (Exception ex) {
logger.error("Error in test addressResolutionStatistics", ex);
fail("This test should not throw exception " + ex);
} finally {
safeDeleteSyncDatabase(cosmosDatabase);
if (client1 != null) {
client1.close();
}
if (client2 != null) {
client2.close();
}
}
}
private InternalObjectNode getInternalObjectNode() {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", uuid);
return internalObjectNode;
}
private InternalObjectNode getInternalObjectNode(String pkValue) {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue);
return internalObjectNode;
}
private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
@SuppressWarnings({"unchecked"})
List<ClientSideRequestStatistics.StoreResponseStatistics> list
= (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics);
return list;
}
private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>());
}
private void validateTransportRequestTimelineGateway(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"requestSent\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
}
private void validateTransportRequestTimelineDirect(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"created\"");
assertThat(diagnostics).contains("\"eventName\":\"queued\"");
assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\"");
assertThat(diagnostics).contains("\"eventName\":\"pipelined\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
assertThat(diagnostics).contains("\"eventName\":\"completed\"");
assertThat(diagnostics).contains("\"startTimeUTC\"");
assertThat(diagnostics).contains("\"durationInMicroSec\"");
}
public void isValidJSON(final String json) {
try {
final JsonParser parser = new ObjectMapper().createParser(json);
while (parser.nextToken() != null) {
}
} catch (IOException ex) {
fail("Diagnostic string is not in json format ", ex);
}
}
private HttpClient httpClient(boolean fakeProxy) {
HttpClientConfig httpClientConfig;
if(fakeProxy) {
httpClientConfig = new HttpClientConfig(new Configs())
.withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)));
} else {
httpClientConfig = new HttpClientConfig(new Configs());
}
return HttpClient.createFixed(httpClientConfig);
}
private IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
}
private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception {
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient);
GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient);
LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager);
Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo");
locationInfoField.setAccessible(true);
Object locationInfo = locationInfoField.get(locationCache);
Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" +
".LocationCache$DatabaseAccountLocationsInfo");
Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField(
"availableWriteEndpointByLocation");
availableWriteEndpointByLocation.setAccessible(true);
@SuppressWarnings("unchecked")
Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo);
String regionName = map.keySet().iterator().next();
assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1);
assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase());
}
public static class TestItem {
public String id;
public String mypk;
public TestItem() {
}
}
} | class CosmosDiagnosticsTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT;
private CosmosClient gatewayClient;
private CosmosClient directClient;
private CosmosAsyncDatabase cosmosAsyncDatabase;
private CosmosContainer container;
private CosmosAsyncContainer cosmosAsyncContainer;
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void beforeClass() {
assertThat(this.gatewayClient).isNull();
gatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
directClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient());
cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId());
container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
if (this.gatewayClient != null) {
this.gatewayClient.close();
}
if (this.directClient != null) {
this.directClient.close();
}
}
@DataProvider(name = "query")
private Object[][] query() {
return new Object[][]{
new Object[] { "Select * from c where c.id = 'wrongId'", true },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
};
}
@DataProvider(name = "readAllItemsOfLogicalPartition")
private Object[][] readAllItemsOfLogicalPartition() {
return new Object[][]{
new Object[] { 1, true },
new Object[] { 5, null },
new Object[] { 20, null },
new Object[] { 1, false },
new Object[] { 5, false },
new Object[] { 20, false },
};
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnostics() throws Exception {
CosmosClient testGatewayClient = null;
try {
testGatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosContainer container =
testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Create\"");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient());
isValidJSON(diagnostics);
} finally {
if (testGatewayClient != null) {
testGatewayClient.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnosticsOnException() throws Exception {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
try {
createResponse = this.container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
this.container.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"statusCode\":404");
assertThat(diagnostics).contains("\"operationType\":\"Read\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient());
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
isValidJSON(diagnostics);
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void systemDiagnosticsForSystemStateInformation() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("systemInformation");
assertThat(diagnostics).contains("usedMemory");
assertThat(diagnostics).contains("availableMemory");
assertThat(diagnostics).contains("systemCpuLoad");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnostics() throws Exception {
CosmosClient testDirectClient = null;
try {
testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer cosmosContainer =
testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient());
isValidJSON(diagnostics);
try {
cosmosContainer.createItem(internalObjectNode);
fail("expected 409");
} catch (CosmosException e) {
diagnostics = e.getDiagnostics().toString();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
validateTransportRequestTimelineDirect(e.getDiagnostics().toString());
}
} finally {
if (testDirectClient != null) {
testDirectClient.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryPlanDiagnostics() throws JsonProcessingException {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
queryList.add("Select * from c");
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
queryList.add("Select * from c where c.id = 'wrongId'");
for(String query : queryList) {
int feedResponseCounter = 0;
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
if (feedResponseCounter == 0) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline());
assertThat(requestTimeLine).contains("connectionConfigured");
assertThat(requestTimeLine).contains("requestSent");
assertThat(requestTimeLine).contains("transitTime");
assertThat(requestTimeLine).contains("received");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline =");
}
feedResponseCounter++;
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithIndexMetrics() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
for (String query : queryList) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
options.setIndexMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
logger.info("This is query diagnostics {}", queryDiagnostics);
if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) {
assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull();
assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull();
}
}
}
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT)
public void queryMetrics(String query, Boolean qmEnabled) {
CosmosContainer directContainer =
this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId())
.getContainer(this.cosmosAsyncContainer.getId());
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
boolean qroupByFirstResponse = true;
Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options,
InternalObjectNode.class).iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateDirectModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
private void validateDirectModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("responseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
private void validateGatewayModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Query\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"regionsContacted\"");
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2)
public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) {
CosmosClient testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId())
.getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for (int i = 0; i < 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if (i % 20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
boolean qroupByFirstResponse = true;
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer
.queryItems(query, options, InternalObjectNode.class)
.iterableByPage()
.iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateGatewayModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithADifferentLocale() {
Locale.setDefault(Locale.GERMAN);
String query = "select * from root where root.id= \"someid\"";
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options,
InternalObjectNode.class)
.iterableByPage().iterator();
double requestCharge = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
requestCharge += feedResponse.getRequestCharge();
}
assertThat(requestCharge).isGreaterThan(0);
Locale.setDefault(Locale.ROOT);
}
private static void validateQueryDiagnostics(
String queryDiagnostics,
Boolean qmEnabled,
boolean expectQueryPlanDiagnostics) {
if (qmEnabled == null || qmEnabled) {
assertThat(queryDiagnostics).contains("Retrieved Document Count");
assertThat(queryDiagnostics).contains("Query Preparation Times");
assertThat(queryDiagnostics).contains("Runtime Execution Times");
assertThat(queryDiagnostics).contains("Partition Execution Timeline");
} else {
assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count");
assertThat(queryDiagnostics).doesNotContain("Query Preparation Times");
assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times");
assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline");
}
if (expectQueryPlanDiagnostics) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
}
}
@Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT)
public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) {
String pkValue = UUID.randomUUID().toString();
for (int i = 0; i < expectedItemCount; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue);
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
}
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options = options.setQueryMetricsEnabled(qmEnabled);
}
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5);
Iterator<FeedResponse<InternalObjectNode>> iterator =
this.container
.readAllItems(
new PartitionKey(pkValue),
options,
InternalObjectNode.class)
.iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
int actualItemCount = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
actualItemCount += feedResponse.getResults().size();
validateQueryDiagnostics(queryDiagnostics, qmEnabled, false);
}
assertThat(actualItemCount).isEqualTo(expectedItemCount);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnException() throws Exception {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
isValidJSON(diagnostics);
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient());
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnMetadataException() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class)))
.thenReturn(Mono.error(new CosmosException(400, "TestBadRequest")));
RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper());
ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient);
container.createItem(internalObjectNode);
fail("request should fail as bad request");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\"");
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
isValidJSON(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void supplementalResponseStatisticsList() throws Exception {
ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null);
for (int i = 0; i < 15; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
ObjectMapper objectMapper = new ObjectMapper();
String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
JsonNode jsonNode = objectMapper.readTree(diagnostics);
ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(15);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10);
clearStoreResponseStatistics(clientSideRequestStatistics);
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
assertThat(storeResponseStatistics.size()).isEqualTo(0);
for (int i = 0; i < 7; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
objectMapper = new ObjectMapper();
diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
jsonNode = objectMapper.readTree(diagnostics);
supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(7);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7);
for(JsonNode node : supplementalResponseStatisticsListNode) {
assertThat(node.get("storeResult").asText()).isNotNull();
String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText();
Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC));
assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000);
assertThat(node.get("requestResponseTimeUTC")).isNotNull();
assertThat(node.get("requestOperationType")).isNotNull();
assertThat(node.get("requestOperationType")).isNotNull();
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void serializationOnVariousScenarios() {
CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read();
String diagnostics = cosmosDatabase.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\"");
CosmosContainerResponse containerResponse = this.container.read();
diagnostics = containerResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\"");
TestItem testItem = new TestItem();
testItem.id = "TestId";
testItem.mypk = "TestPk";
CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
testItem.id = "TestId2";
testItem.mypk = "TestPk";
itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\"");
TestItem readTestItem = itemResponse.getItem();
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class);
InternalObjectNode properties = readItemResponse.getItem();
diagnostics = readItemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void rntbdRequestResponseLengthStatistics() throws Exception {
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem);
validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse));
try {
container.createItem(testItem);
fail("expected to fail due to 409");
} catch (CosmosException e) {
validate(e.getDiagnostics(), testItemLength, 0);
}
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void rntbdStatistics() throws Exception {
Instant beforeClientInitialization = Instant.now();
CosmosClient client1 = null;
try {
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode()
.buildClient();
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
Thread.sleep(1000);
Instant beforeInitializingRntbdServiceEndpoint = Instant.now();
CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem);
Instant afterInitializingRntbdServiceEndpoint = Instant.now();
Thread.sleep(1000);
Instant beforeOperation2 = Instant.now();
CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem);
Instant afterOperation2 = Instant.now();
Thread.sleep(1000);
Instant beforeOperation3 = Instant.now();
CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem);
Instant afterOperation3 = Instant.now();
validateRntbdStatistics(operation3Response.getDiagnostics(),
beforeClientInitialization,
beforeInitializingRntbdServiceEndpoint,
afterInitializingRntbdServiceEndpoint,
beforeOperation2,
afterOperation2,
beforeOperation3,
afterOperation3);
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
} finally {
LifeCycleUtils.closeQuietly(client1);
}
}
private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics,
Instant clientInitializationTime,
Instant beforeInitializingRntbdServiceEndpoint,
Instant afterInitializingRntbdServiceEndpoint,
Instant beforeOperation2,
Instant afterOperation2,
Instant beforeOperation3,
Instant afterOperation3) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
assertThat(storeResult).isNotNull();
boolean hasPayload = storeResult.get("exception").isNull();
assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0);
assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0);
JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics");
assertThat(serviceEndpointStatistics).isNotNull();
assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0);
assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0);
assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1);
assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false);
Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isAfterOrEqualTo(beforeInitializationThreshold);
Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isBeforeOrEqualTo(afterInitializationThreshold);
Instant afterOperation2Threshold = afterOperation2.plusMillis(2);
Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold)
.isBeforeOrEqualTo(afterOperation2Threshold);
assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold)
.isBeforeOrEqualTo(afterOperation2Threshold);
}
private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
boolean hasPayload = storeResult.get("exception").isNull();
assertThat(storeResult).isNotNull();
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize);
if (hasPayload) {
assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize);
}
assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize);
}
@Test(groups = {"emulator"}, timeOut = TIMEOUT)
public void addressResolutionStatistics() {
CosmosClient client1 = null;
CosmosClient client2 = null;
String databaseId = DatabaseForTest.generateId();
String containerId = UUID.randomUUID().toString();
CosmosDatabase cosmosDatabase = null;
CosmosContainer cosmosContainer = null;
try {
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
client1.createDatabase(databaseId);
cosmosDatabase = client1.getDatabase(databaseId);
cosmosDatabase.createContainer(containerId, "/mypk");
InternalObjectNode internalObjectNode = getInternalObjectNode();
cosmosContainer = cosmosDatabase.getContainer(containerId);
CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode);
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null");
assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" +
".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information");
client2 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosDatabase = client2.getDatabase(databaseId);
cosmosContainer = cosmosDatabase.getContainer(containerId);
AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient();
GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient,
"addressResolver", true);
@SuppressWarnings("rawtypes")
Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver,
"addressCacheByEndpoint",
true);
Object endpointCache = addressCacheByEndpoint.values().toArray()[0];
GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true);
HttpClient httpClient = httpClient(true);
FieldUtils.writeField(addressCache, "httpClient", httpClient, true);
new Thread(() -> {
try {
Thread.sleep(5000);
HttpClient httpClient1 = httpClient(false);
FieldUtils.writeField(addressCache, "httpClient", httpClient1, true);
} catch (Exception e) {
fail(e.getMessage());
}
}).start();
PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk"));
CosmosItemResponse<InternalObjectNode> readResourceResponse =
cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(),
InternalObjectNode.class);
assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" +
".channel.AbstractChannel$AnnotatedConnectException: Connection refused");
} catch (Exception ex) {
logger.error("Error in test addressResolutionStatistics", ex);
fail("This test should not throw exception " + ex);
} finally {
safeDeleteSyncDatabase(cosmosDatabase);
if (client1 != null) {
client1.close();
}
if (client2 != null) {
client2.close();
}
}
}
private InternalObjectNode getInternalObjectNode() {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", uuid);
return internalObjectNode;
}
private InternalObjectNode getInternalObjectNode(String pkValue) {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue);
return internalObjectNode;
}
private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
@SuppressWarnings({"unchecked"})
List<ClientSideRequestStatistics.StoreResponseStatistics> list
= (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics);
return list;
}
private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>());
}
private void validateTransportRequestTimelineGateway(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"requestSent\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
}
private void validateTransportRequestTimelineDirect(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"created\"");
assertThat(diagnostics).contains("\"eventName\":\"queued\"");
assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\"");
assertThat(diagnostics).contains("\"eventName\":\"pipelined\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
assertThat(diagnostics).contains("\"eventName\":\"completed\"");
assertThat(diagnostics).contains("\"startTimeUTC\"");
assertThat(diagnostics).contains("\"durationInMicroSec\"");
}
public void isValidJSON(final String json) {
try {
final JsonParser parser = new ObjectMapper().createParser(json);
while (parser.nextToken() != null) {
}
} catch (IOException ex) {
fail("Diagnostic string is not in json format ", ex);
}
}
private HttpClient httpClient(boolean fakeProxy) {
HttpClientConfig httpClientConfig;
if(fakeProxy) {
httpClientConfig = new HttpClientConfig(new Configs())
.withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)));
} else {
httpClientConfig = new HttpClientConfig(new Configs());
}
return HttpClient.createFixed(httpClientConfig);
}
private IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
}
private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception {
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient);
GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient);
LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager);
Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo");
locationInfoField.setAccessible(true);
Object locationInfo = locationInfoField.get(locationCache);
Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" +
".LocationCache$DatabaseAccountLocationsInfo");
Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField(
"availableWriteEndpointByLocation");
availableWriteEndpointByLocation.setAccessible(true);
@SuppressWarnings("unchecked")
Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo);
String regionName = map.keySet().iterator().next();
assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1);
assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase());
}
public static class TestItem {
public String id;
public String mypk;
public TestItem() {
}
}
} |
Exactly, so if the prefetch is enabled and drain couldn't emit downstream, we break from the while-loop in line 216; that way, we don't consider the message as consumed, hence increment won't happen. | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | numberConsumed++; | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} |
Do we recommend using `logger.atWarning().addKeyValue("lockToken", <>)`? | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | error -> logger.warning("lockToken[{}] Couldn't release the message.", | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} |
I think lock-token is something eligible to define as an attribute to lookup, but we haven't migrated the SB package to structured logging yet, so I think we should decide the standard attributes (to use as key-value pairs) when making that effort in the SB package as a whole. | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | error -> logger.warning("lockToken[{}] Couldn't release the message.", | private void drainQueue() {
if (isTerminated()) {
return;
}
long numberRequested = REQUESTED.get(this);
boolean isEmpty = bufferMessages.isEmpty();
SynchronousReceiveWork currentDownstream = null;
while (numberRequested != 0L && !isEmpty) {
if (isTerminated()) {
break;
}
long numberConsumed = 0L;
while (numberRequested != numberConsumed) {
if (isEmpty || isTerminated()) {
break;
}
final ServiceBusReceivedMessage message = bufferMessages.poll();
boolean isEmitted = false;
while (!isEmitted) {
currentDownstream = getOrUpdateCurrentWork();
if (currentDownstream == null) {
break;
}
isEmitted = currentDownstream.emitNext(message);
}
if (!isEmitted) {
if (isPrefetchDisabled) {
asyncClient.release(message).subscribe(__ -> { },
error -> logger.warning("lockToken[{}] Couldn't release the message.",
message.getLockToken(), error),
() -> logger.verbose("lockToken[{}] Message successfully released.",
message.getLockToken()));
} else {
bufferMessages.addFirst(message);
break;
}
}
numberConsumed++;
isEmpty = bufferMessages.isEmpty();
}
final long requestedMessages = REQUESTED.get(this);
if (requestedMessages != Long.MAX_VALUE) {
numberRequested = REQUESTED.addAndGet(this, -numberConsumed);
}
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} | class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> {
private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class);
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicInteger wip = new AtomicInteger();
private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>();
private final Object currentWorkLock = new Object();
private final ServiceBusReceiverAsyncClient asyncClient;
private final boolean isPrefetchDisabled;
private final Duration operationTimeout;
private volatile SynchronousReceiveWork currentWork;
/**
* The number of requested messages.
*/
private volatile long requested;
private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested");
private volatile Subscription upstream;
private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class,
"upstream");
/**
* Creates a synchronous subscriber with some initial work to queue.
*
*
* @param asyncClient Client to update disposition of messages.
* @param isPrefetchDisabled Indicates if the prefetch is disabled.
* @param operationTimeout Timeout to wait for operation to complete.
* @param initialWork Initial work to queue.
*
* <p>
* When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan
* between the last terminated downstream and the next active downstream.
* </p>
*
* @throws NullPointerException if {@code initialWork} is null.
* @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1.
*/
SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient,
SynchronousReceiveWork initialWork,
boolean isPrefetchDisabled,
Duration operationTimeout) {
this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null.");
this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null.");
this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null."));
this.isPrefetchDisabled = isPrefetchDisabled;
if (initialWork.getNumberOfEvents() < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents()));
}
Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents());
}
/**
* On an initial subscription, will take the first work item, and request that amount of work for it.
*
* @param subscription Subscription for upstream.
*/
@Override
protected void hookOnSubscribe(Subscription subscription) {
if (!Operators.setOnce(UPSTREAM, this, subscription)) {
logger.warning("This should only be subscribed to once. Ignoring subscription.");
return;
}
getOrUpdateCurrentWork();
subscription.request(REQUESTED.get(this));
}
/**
* Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of
* the subscriber.
*
* @param message Event to publish.
*/
@Override
protected void hookOnNext(ServiceBusReceivedMessage message) {
if (isTerminated()) {
Operators.onNextDropped(message, Context.empty());
} else {
bufferMessages.add(message);
drain();
}
}
/**
* Queue the work to be picked up by drain loop.
*
* @param work to be queued.
*/
void queueWork(SynchronousReceiveWork work) {
Objects.requireNonNull(work, "'work' cannot be null");
workQueue.add(work);
if (workQueue.peek() == work) {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if "
+ "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout());
getOrUpdateCurrentWork();
} else {
logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(),
work.getNumberOfEvents(), work.getTimeout());
}
if (UPSTREAM.get(this) != null) {
drain();
}
}
/**
* Drain the work, only one thread can be in this loop at a time.
*/
private void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
while (missed != 0) {
try {
drainQueue();
} finally {
missed = wip.addAndGet(-missed);
}
}
}
/***
* Drain the queue using a lock on current work in progress.
*/
/**
* {@inheritDoc}
*/
@Override
protected void hookOnError(Throwable throwable) {
dispose("Errors occurred upstream", throwable);
}
@Override
protected void hookOnCancel() {
this.dispose();
}
private boolean isTerminated() {
if (UPSTREAM.get(this) == Operators.cancelledSubscription()) {
return true;
}
return isDisposed.get();
}
/**
* Gets the current work item if it is not terminal and cleans up any existing timeout operations.
*
* @return Gets or sets the next work item. Null if there are no work items currently.
*/
private SynchronousReceiveWork getOrUpdateCurrentWork() {
synchronized (currentWorkLock) {
if (currentWork != null && !currentWork.isTerminal()) {
return currentWork;
}
currentWork = workQueue.poll();
while (currentWork != null) {
if (currentWork.isTerminal()) {
REQUESTED.updateAndGet(this, currentRequest -> {
final int remainingEvents = currentWork.getRemainingEvents();
if (remainingEvents < 1) {
return currentRequest;
}
final long difference = currentRequest - remainingEvents;
logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]"
+ " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents,
difference);
return difference < 0 ? 0 : difference;
});
currentWork = workQueue.poll();
continue;
}
final SynchronousReceiveWork work = currentWork;
logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(),
work.getNumberOfEvents());
work.start();
requestUpstream(work.getNumberOfEvents());
return work;
}
return currentWork;
}
}
/**
* Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED}
* items.
*
* @param numberOfMessages Number of messages required downstream.
*/
private void requestUpstream(long numberOfMessages) {
if (isTerminated()) {
logger.info("Cannot request more messages upstream. Subscriber is terminated.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.info("There is no upstream to request messages from.");
return;
}
final long currentRequested = REQUESTED.get(this);
final long difference = numberOfMessages - currentRequested;
logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]",
currentRequested, numberOfMessages, difference);
if (difference <= 0) {
return;
}
Operators.addCap(REQUESTED, this, difference);
subscription.request(difference);
}
@Override
public void dispose() {
super.dispose();
dispose("Upstream completed the receive work.", null);
}
private void dispose(String message, Throwable throwable) {
super.dispose();
if (isDisposed.getAndSet(true)) {
return;
}
synchronized (currentWorkLock) {
if (currentWork != null) {
currentWork.complete(message, throwable);
currentWork = null;
}
SynchronousReceiveWork w = workQueue.poll();
while (w != null) {
w.complete(message, throwable);
w = workQueue.poll();
}
}
}
/**
* package-private method to check queue size.
*
* @return The current number of items in the queue.
*/
int getWorkQueueSize() {
return this.workQueue.size();
}
} |
I don't understand these two tests. Why is IllegalStateException thrown? | public void bothRetryOptionsAndRetryPolicySetSync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildClient());
} | assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder) | public void bothRetryOptionsAndRetryPolicySetSync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildClient());
} | class PhoneNumbersClientBuilderTest {
private static final String ENDPOINT = "https:
private static final String ACCESSKEY = "QWNjZXNzS2V5";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-communication-phonenumbers.properties");
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private HttpClient httpClient;
private PhoneNumbersClientBuilder clientBuilder;
@BeforeEach
void setUp() {
this.httpClient = mock(HttpClient.class);
this.clientBuilder = Mockito.spy(new PhoneNumbersClientBuilder());
}
@AfterEach
void tearDown() {
Mockito.framework().clearInlineMock(this);
}
@Test()
public void buildClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildClient();
assertNotNull(phoneNumberClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderCustomPipeline(httpPipeline).buildClient();
assertNotNull(phoneNumberClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildClient();
assertNotNull(phoneNumberClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildClient();
assertNotNull(phoneNumberClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildClientWithServiceVersion() {
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildClient();
assertNotNull(phoneNumberClient);
}
@Test()
public void buildClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientNoEndpoint() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildClientNoPipelineNoCredentials() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void buildAsyncClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildAsyncClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderCustomPipeline(httpPipeline).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildAsyncClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildAsyncClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildAsyncClientWithServiceVersion() {
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
}
@Test()
public void buildAsyncClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientNoEndpointThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildAsyncClientNoPipelineNoCredentialsThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void setEndpointNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(null);
});
}
@Test()
public void addPolicyNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.addPolicy(null);
});
}
@Test
@Test
public void bothRetryOptionsAndRetryPolicySetAsync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildAsyncClient());
}
private PhoneNumbersClientBuilder setupBuilderWithHttpClientWithCredential(PhoneNumbersClientBuilder clientBuilder) {
return clientBuilder
.endpoint(ENDPOINT)
.httpClient(this.httpClient)
.credential(new AzureKeyCredential(ACCESSKEY));
}
private PhoneNumbersClientBuilder setupBuilderWithPolicies(
PhoneNumbersClientBuilder clientBuilder, List<HttpPipelinePolicy> policies) {
clientBuilder = this.setupBuilderWithHttpClientWithCredential(clientBuilder);
for (HttpPipelinePolicy policy : policies) {
clientBuilder.addPolicy(policy);
}
return clientBuilder;
}
private PhoneNumbersClientBuilder setupBuilderCustomPipeline(HttpPipeline pipeline) {
return clientBuilder
.endpoint(ENDPOINT)
.pipeline(pipeline);
}
private void validateRequiredSettings(ClientBuilderSpyHelper spyHelper) {
spyHelper.capturePhoneNumberAdminClientImpl();
spyHelper.captureHttpPipelineSettings();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(ENDPOINT, phoneNumberManagementClient.getEndpoint());
assertEquals(this.httpClient, phoneNumberManagementClient.getHttpPipeline().getHttpClient());
assertEquals(6, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(5));
assertEquals(spyHelper.defaultHttpLogOptionsRef.get(), spyHelper.httpLogOptionsArg.getValue());
assertEquals(spyHelper.defaultHttpLogOptionsRef.get().getApplicationId(), spyHelper.uaPolicyAppIdArg.getValue());
assertEquals(PROPERTIES.get((SDK_NAME)), spyHelper.uaPolicySdkNameArg.getValue());
assertEquals(PROPERTIES.get((SDK_VERSION)), spyHelper.uaPolicySdkVersionArg.getValue());
assertNull(spyHelper.uaPolicyConfigArg.getValue());
}
private void validateCustomPipeline(ClientBuilderSpyHelper spyHelper, HttpPipeline expectedPipeline) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberAdminClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(expectedPipeline, phoneNumberAdminClient.getHttpPipeline());
}
private void validateLogOptions(ClientBuilderSpyHelper spyHelper, HttpLogOptions expectedLogOptions) {
spyHelper.captureHttpPipelineSettings();
HttpLogOptions actualLogOptions = spyHelper.httpLogOptionsArg.getValue();
assertEquals(expectedLogOptions, actualLogOptions);
}
private void validateConfiguration(ClientBuilderSpyHelper spyHelper, Configuration expectedConfiguration) {
spyHelper.captureHttpPipelineSettings();
Configuration actualConfiguration = spyHelper.uaPolicyConfigArg.getValue();
assertEquals(expectedConfiguration, actualConfiguration);
}
private void validateAdditionalPolicies(ClientBuilderSpyHelper spyHelper, List<HttpPipelinePolicy> policies) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
int expectedPolicyCount = 6 + policies.size();
int lastPolicyIndex = expectedPolicyCount - 1;
int customPolicyIndex = 5;
assertEquals(expectedPolicyCount, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(lastPolicyIndex));
for (HttpPipelinePolicy policy : policies) {
assertEquals(policy, phoneNumberManagementClient.getHttpPipeline().getPolicy(customPolicyIndex));
customPolicyIndex++;
}
}
private class ClientBuilderSpyHelper {
final PhoneNumbersClientBuilder clientBuilder;
final AtomicReference<HmacAuthenticationPolicy> authenticationPolicyRef = new AtomicReference<>();
final AtomicReference<UserAgentPolicy> userAgentPolicyRef = new AtomicReference<>();
final AtomicReference<RequestIdPolicy> requestIdPolicyRef = new AtomicReference<>();
final AtomicReference<RetryPolicy> retryPolicyRef = new AtomicReference<>();
final AtomicReference<CookiePolicy> cookiePolicyRef = new AtomicReference<>();
final AtomicReference<HttpLoggingPolicy> httpLoggingPolicyRef = new AtomicReference<>();
final AtomicReference<HttpLogOptions> defaultHttpLogOptionsRef = new AtomicReference<>();
final ArgumentCaptor<PhoneNumberAdminClientImpl> phoneNumberAdminClientArg =
ArgumentCaptor.forClass(PhoneNumberAdminClientImpl.class);
final ArgumentCaptor<String> uaPolicyAppIdArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkNameArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkVersionArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<Configuration> uaPolicyConfigArg = ArgumentCaptor.forClass(Configuration.class);
final ArgumentCaptor<HttpLogOptions> httpLogOptionsArg = ArgumentCaptor.forClass(HttpLogOptions.class);
ClientBuilderSpyHelper(PhoneNumbersClientBuilder clientBuilder) {
this.clientBuilder = clientBuilder;
this.initializeSpies();
}
private void initializeSpies() {
Answer<HmacAuthenticationPolicy> createCommunicationClientCredentialPolicy = (invocation) -> {
this.authenticationPolicyRef.set((HmacAuthenticationPolicy) invocation.callRealMethod());
return this.authenticationPolicyRef.get();
};
doAnswer(createCommunicationClientCredentialPolicy).when(this.clientBuilder).createAuthenticationPolicy();
Answer<UserAgentPolicy> createUserAgentPolicy = (invocation) -> {
this.userAgentPolicyRef.set(mock(UserAgentPolicy.class));
return this.userAgentPolicyRef.get();
};
doAnswer(createUserAgentPolicy).when(this.clientBuilder).createUserAgentPolicy(any(), any(), any(), any());
Answer<RequestIdPolicy> createRequestIdPolicy = (invocation) -> {
this.requestIdPolicyRef.set(mock(RequestIdPolicy.class));
return this.requestIdPolicyRef.get();
};
doAnswer(createRequestIdPolicy).when(this.clientBuilder).createRequestIdPolicy();
Answer<CookiePolicy> createCookiePolicy = (invocation) -> {
this.cookiePolicyRef.set((CookiePolicy) invocation.callRealMethod());
return this.cookiePolicyRef.get();
};
doAnswer(createCookiePolicy).when(this.clientBuilder).createCookiePolicy();
Answer<HttpLoggingPolicy> createHttpLoggingPolicy = (invocation) -> {
this.httpLoggingPolicyRef.set((HttpLoggingPolicy) invocation.callRealMethod());
return this.httpLoggingPolicyRef.get();
};
doAnswer(createHttpLoggingPolicy).when(this.clientBuilder).createHttpLoggingPolicy(any());
Answer<HttpLogOptions> createDefaultHttpLogOptions = (invocation) -> {
this.defaultHttpLogOptionsRef.set((HttpLogOptions) invocation.callRealMethod());
return this.defaultHttpLogOptionsRef.get();
};
doAnswer(createDefaultHttpLogOptions).when(this.clientBuilder).createDefaultHttpLogOptions();
}
void capturePhoneNumberAdminClientImpl() {
verify(this.clientBuilder, times(1))
.createPhoneNumberAsyncClient(this.phoneNumberAdminClientArg.capture());
}
void captureHttpPipelineSettings() {
verify(this.clientBuilder, times(1))
.createAuthenticationPolicy();
verify(this.clientBuilder, times(1))
.createUserAgentPolicy(
this.uaPolicyAppIdArg.capture(),
this.uaPolicySdkNameArg.capture(),
this.uaPolicySdkVersionArg.capture(),
this.uaPolicyConfigArg.capture());
verify(this.clientBuilder, times(1))
.createHttpLoggingPolicy(this.httpLogOptionsArg.capture());
}
}
} | class PhoneNumbersClientBuilderTest {
private static final String ENDPOINT = "https:
private static final String ACCESSKEY = "QWNjZXNzS2V5";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-communication-phonenumbers.properties");
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private HttpClient httpClient;
private PhoneNumbersClientBuilder clientBuilder;
@BeforeEach
void setUp() {
this.httpClient = mock(HttpClient.class);
this.clientBuilder = Mockito.spy(new PhoneNumbersClientBuilder());
}
@AfterEach
void tearDown() {
Mockito.framework().clearInlineMock(this);
}
@Test()
public void buildClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildClient();
assertNotNull(phoneNumberClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderCustomPipeline(httpPipeline).buildClient();
assertNotNull(phoneNumberClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildClient();
assertNotNull(phoneNumberClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildClient();
assertNotNull(phoneNumberClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildClientWithServiceVersion() {
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildClient();
assertNotNull(phoneNumberClient);
}
@Test()
public void buildClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientNoEndpoint() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildClientNoPipelineNoCredentials() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void buildAsyncClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildAsyncClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderCustomPipeline(httpPipeline).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildAsyncClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildAsyncClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildAsyncClientWithServiceVersion() {
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
}
@Test()
public void buildAsyncClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientNoEndpointThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildAsyncClientNoPipelineNoCredentialsThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void setEndpointNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(null);
});
}
@Test()
public void addPolicyNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.addPolicy(null);
});
}
@Test
@Test
public void bothRetryOptionsAndRetryPolicySetAsync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildAsyncClient());
}
private PhoneNumbersClientBuilder setupBuilderWithHttpClientWithCredential(PhoneNumbersClientBuilder clientBuilder) {
return clientBuilder
.endpoint(ENDPOINT)
.httpClient(this.httpClient)
.credential(new AzureKeyCredential(ACCESSKEY));
}
private PhoneNumbersClientBuilder setupBuilderWithPolicies(
PhoneNumbersClientBuilder clientBuilder, List<HttpPipelinePolicy> policies) {
clientBuilder = this.setupBuilderWithHttpClientWithCredential(clientBuilder);
for (HttpPipelinePolicy policy : policies) {
clientBuilder.addPolicy(policy);
}
return clientBuilder;
}
private PhoneNumbersClientBuilder setupBuilderCustomPipeline(HttpPipeline pipeline) {
return clientBuilder
.endpoint(ENDPOINT)
.pipeline(pipeline);
}
private void validateRequiredSettings(ClientBuilderSpyHelper spyHelper) {
spyHelper.capturePhoneNumberAdminClientImpl();
spyHelper.captureHttpPipelineSettings();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(ENDPOINT, phoneNumberManagementClient.getEndpoint());
assertEquals(this.httpClient, phoneNumberManagementClient.getHttpPipeline().getHttpClient());
assertEquals(6, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(5));
assertEquals(spyHelper.defaultHttpLogOptionsRef.get(), spyHelper.httpLogOptionsArg.getValue());
assertEquals(spyHelper.defaultHttpLogOptionsRef.get().getApplicationId(), spyHelper.uaPolicyAppIdArg.getValue());
assertEquals(PROPERTIES.get((SDK_NAME)), spyHelper.uaPolicySdkNameArg.getValue());
assertEquals(PROPERTIES.get((SDK_VERSION)), spyHelper.uaPolicySdkVersionArg.getValue());
assertNull(spyHelper.uaPolicyConfigArg.getValue());
}
private void validateCustomPipeline(ClientBuilderSpyHelper spyHelper, HttpPipeline expectedPipeline) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberAdminClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(expectedPipeline, phoneNumberAdminClient.getHttpPipeline());
}
private void validateLogOptions(ClientBuilderSpyHelper spyHelper, HttpLogOptions expectedLogOptions) {
spyHelper.captureHttpPipelineSettings();
HttpLogOptions actualLogOptions = spyHelper.httpLogOptionsArg.getValue();
assertEquals(expectedLogOptions, actualLogOptions);
}
private void validateConfiguration(ClientBuilderSpyHelper spyHelper, Configuration expectedConfiguration) {
spyHelper.captureHttpPipelineSettings();
Configuration actualConfiguration = spyHelper.uaPolicyConfigArg.getValue();
assertEquals(expectedConfiguration, actualConfiguration);
}
private void validateAdditionalPolicies(ClientBuilderSpyHelper spyHelper, List<HttpPipelinePolicy> policies) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
int expectedPolicyCount = 6 + policies.size();
int lastPolicyIndex = expectedPolicyCount - 1;
int customPolicyIndex = 5;
assertEquals(expectedPolicyCount, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(lastPolicyIndex));
for (HttpPipelinePolicy policy : policies) {
assertEquals(policy, phoneNumberManagementClient.getHttpPipeline().getPolicy(customPolicyIndex));
customPolicyIndex++;
}
}
private class ClientBuilderSpyHelper {
final PhoneNumbersClientBuilder clientBuilder;
final AtomicReference<HmacAuthenticationPolicy> authenticationPolicyRef = new AtomicReference<>();
final AtomicReference<UserAgentPolicy> userAgentPolicyRef = new AtomicReference<>();
final AtomicReference<RequestIdPolicy> requestIdPolicyRef = new AtomicReference<>();
final AtomicReference<RetryPolicy> retryPolicyRef = new AtomicReference<>();
final AtomicReference<CookiePolicy> cookiePolicyRef = new AtomicReference<>();
final AtomicReference<HttpLoggingPolicy> httpLoggingPolicyRef = new AtomicReference<>();
final AtomicReference<HttpLogOptions> defaultHttpLogOptionsRef = new AtomicReference<>();
final ArgumentCaptor<PhoneNumberAdminClientImpl> phoneNumberAdminClientArg =
ArgumentCaptor.forClass(PhoneNumberAdminClientImpl.class);
final ArgumentCaptor<String> uaPolicyAppIdArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkNameArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkVersionArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<Configuration> uaPolicyConfigArg = ArgumentCaptor.forClass(Configuration.class);
final ArgumentCaptor<HttpLogOptions> httpLogOptionsArg = ArgumentCaptor.forClass(HttpLogOptions.class);
ClientBuilderSpyHelper(PhoneNumbersClientBuilder clientBuilder) {
this.clientBuilder = clientBuilder;
this.initializeSpies();
}
private void initializeSpies() {
Answer<HmacAuthenticationPolicy> createCommunicationClientCredentialPolicy = (invocation) -> {
this.authenticationPolicyRef.set((HmacAuthenticationPolicy) invocation.callRealMethod());
return this.authenticationPolicyRef.get();
};
doAnswer(createCommunicationClientCredentialPolicy).when(this.clientBuilder).createAuthenticationPolicy();
Answer<UserAgentPolicy> createUserAgentPolicy = (invocation) -> {
this.userAgentPolicyRef.set(mock(UserAgentPolicy.class));
return this.userAgentPolicyRef.get();
};
doAnswer(createUserAgentPolicy).when(this.clientBuilder).createUserAgentPolicy(any(), any(), any(), any());
Answer<RequestIdPolicy> createRequestIdPolicy = (invocation) -> {
this.requestIdPolicyRef.set(mock(RequestIdPolicy.class));
return this.requestIdPolicyRef.get();
};
doAnswer(createRequestIdPolicy).when(this.clientBuilder).createRequestIdPolicy();
Answer<CookiePolicy> createCookiePolicy = (invocation) -> {
this.cookiePolicyRef.set((CookiePolicy) invocation.callRealMethod());
return this.cookiePolicyRef.get();
};
doAnswer(createCookiePolicy).when(this.clientBuilder).createCookiePolicy();
Answer<HttpLoggingPolicy> createHttpLoggingPolicy = (invocation) -> {
this.httpLoggingPolicyRef.set((HttpLoggingPolicy) invocation.callRealMethod());
return this.httpLoggingPolicyRef.get();
};
doAnswer(createHttpLoggingPolicy).when(this.clientBuilder).createHttpLoggingPolicy(any());
Answer<HttpLogOptions> createDefaultHttpLogOptions = (invocation) -> {
this.defaultHttpLogOptionsRef.set((HttpLogOptions) invocation.callRealMethod());
return this.defaultHttpLogOptionsRef.get();
};
doAnswer(createDefaultHttpLogOptions).when(this.clientBuilder).createDefaultHttpLogOptions();
}
void capturePhoneNumberAdminClientImpl() {
verify(this.clientBuilder, times(1))
.createPhoneNumberAsyncClient(this.phoneNumberAdminClientArg.capture());
}
void captureHttpPipelineSettings() {
verify(this.clientBuilder, times(1))
.createAuthenticationPolicy();
verify(this.clientBuilder, times(1))
.createUserAgentPolicy(
this.uaPolicyAppIdArg.capture(),
this.uaPolicySdkNameArg.capture(),
this.uaPolicySdkVersionArg.capture(),
this.uaPolicyConfigArg.capture());
verify(this.clientBuilder, times(1))
.createHttpLoggingPolicy(this.httpLogOptionsArg.capture());
}
}
} |
setting the `buider.retryPolicy()` and `builder.retryOptions()` is mutually exclusive. That condition gets evaluated when client is getting built. The default pattern across SDKs is that we validate overlapping settings and throw rather than silently overwrite to let user know that they likely have a bug. This pattern isn't 100% followed across the SDK unfortunately but it's recommended way. | public void bothRetryOptionsAndRetryPolicySetSync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildClient());
} | assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder) | public void bothRetryOptionsAndRetryPolicySetSync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildClient());
} | class PhoneNumbersClientBuilderTest {
private static final String ENDPOINT = "https:
private static final String ACCESSKEY = "QWNjZXNzS2V5";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-communication-phonenumbers.properties");
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private HttpClient httpClient;
private PhoneNumbersClientBuilder clientBuilder;
@BeforeEach
void setUp() {
this.httpClient = mock(HttpClient.class);
this.clientBuilder = Mockito.spy(new PhoneNumbersClientBuilder());
}
@AfterEach
void tearDown() {
Mockito.framework().clearInlineMock(this);
}
@Test()
public void buildClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildClient();
assertNotNull(phoneNumberClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderCustomPipeline(httpPipeline).buildClient();
assertNotNull(phoneNumberClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildClient();
assertNotNull(phoneNumberClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildClient();
assertNotNull(phoneNumberClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildClientWithServiceVersion() {
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildClient();
assertNotNull(phoneNumberClient);
}
@Test()
public void buildClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientNoEndpoint() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildClientNoPipelineNoCredentials() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void buildAsyncClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildAsyncClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderCustomPipeline(httpPipeline).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildAsyncClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildAsyncClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildAsyncClientWithServiceVersion() {
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
}
@Test()
public void buildAsyncClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientNoEndpointThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildAsyncClientNoPipelineNoCredentialsThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void setEndpointNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(null);
});
}
@Test()
public void addPolicyNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.addPolicy(null);
});
}
@Test
@Test
public void bothRetryOptionsAndRetryPolicySetAsync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildAsyncClient());
}
private PhoneNumbersClientBuilder setupBuilderWithHttpClientWithCredential(PhoneNumbersClientBuilder clientBuilder) {
return clientBuilder
.endpoint(ENDPOINT)
.httpClient(this.httpClient)
.credential(new AzureKeyCredential(ACCESSKEY));
}
private PhoneNumbersClientBuilder setupBuilderWithPolicies(
PhoneNumbersClientBuilder clientBuilder, List<HttpPipelinePolicy> policies) {
clientBuilder = this.setupBuilderWithHttpClientWithCredential(clientBuilder);
for (HttpPipelinePolicy policy : policies) {
clientBuilder.addPolicy(policy);
}
return clientBuilder;
}
private PhoneNumbersClientBuilder setupBuilderCustomPipeline(HttpPipeline pipeline) {
return clientBuilder
.endpoint(ENDPOINT)
.pipeline(pipeline);
}
private void validateRequiredSettings(ClientBuilderSpyHelper spyHelper) {
spyHelper.capturePhoneNumberAdminClientImpl();
spyHelper.captureHttpPipelineSettings();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(ENDPOINT, phoneNumberManagementClient.getEndpoint());
assertEquals(this.httpClient, phoneNumberManagementClient.getHttpPipeline().getHttpClient());
assertEquals(6, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(5));
assertEquals(spyHelper.defaultHttpLogOptionsRef.get(), spyHelper.httpLogOptionsArg.getValue());
assertEquals(spyHelper.defaultHttpLogOptionsRef.get().getApplicationId(), spyHelper.uaPolicyAppIdArg.getValue());
assertEquals(PROPERTIES.get((SDK_NAME)), spyHelper.uaPolicySdkNameArg.getValue());
assertEquals(PROPERTIES.get((SDK_VERSION)), spyHelper.uaPolicySdkVersionArg.getValue());
assertNull(spyHelper.uaPolicyConfigArg.getValue());
}
private void validateCustomPipeline(ClientBuilderSpyHelper spyHelper, HttpPipeline expectedPipeline) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberAdminClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(expectedPipeline, phoneNumberAdminClient.getHttpPipeline());
}
private void validateLogOptions(ClientBuilderSpyHelper spyHelper, HttpLogOptions expectedLogOptions) {
spyHelper.captureHttpPipelineSettings();
HttpLogOptions actualLogOptions = spyHelper.httpLogOptionsArg.getValue();
assertEquals(expectedLogOptions, actualLogOptions);
}
private void validateConfiguration(ClientBuilderSpyHelper spyHelper, Configuration expectedConfiguration) {
spyHelper.captureHttpPipelineSettings();
Configuration actualConfiguration = spyHelper.uaPolicyConfigArg.getValue();
assertEquals(expectedConfiguration, actualConfiguration);
}
private void validateAdditionalPolicies(ClientBuilderSpyHelper spyHelper, List<HttpPipelinePolicy> policies) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
int expectedPolicyCount = 6 + policies.size();
int lastPolicyIndex = expectedPolicyCount - 1;
int customPolicyIndex = 5;
assertEquals(expectedPolicyCount, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(lastPolicyIndex));
for (HttpPipelinePolicy policy : policies) {
assertEquals(policy, phoneNumberManagementClient.getHttpPipeline().getPolicy(customPolicyIndex));
customPolicyIndex++;
}
}
private class ClientBuilderSpyHelper {
final PhoneNumbersClientBuilder clientBuilder;
final AtomicReference<HmacAuthenticationPolicy> authenticationPolicyRef = new AtomicReference<>();
final AtomicReference<UserAgentPolicy> userAgentPolicyRef = new AtomicReference<>();
final AtomicReference<RequestIdPolicy> requestIdPolicyRef = new AtomicReference<>();
final AtomicReference<RetryPolicy> retryPolicyRef = new AtomicReference<>();
final AtomicReference<CookiePolicy> cookiePolicyRef = new AtomicReference<>();
final AtomicReference<HttpLoggingPolicy> httpLoggingPolicyRef = new AtomicReference<>();
final AtomicReference<HttpLogOptions> defaultHttpLogOptionsRef = new AtomicReference<>();
final ArgumentCaptor<PhoneNumberAdminClientImpl> phoneNumberAdminClientArg =
ArgumentCaptor.forClass(PhoneNumberAdminClientImpl.class);
final ArgumentCaptor<String> uaPolicyAppIdArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkNameArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkVersionArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<Configuration> uaPolicyConfigArg = ArgumentCaptor.forClass(Configuration.class);
final ArgumentCaptor<HttpLogOptions> httpLogOptionsArg = ArgumentCaptor.forClass(HttpLogOptions.class);
ClientBuilderSpyHelper(PhoneNumbersClientBuilder clientBuilder) {
this.clientBuilder = clientBuilder;
this.initializeSpies();
}
private void initializeSpies() {
Answer<HmacAuthenticationPolicy> createCommunicationClientCredentialPolicy = (invocation) -> {
this.authenticationPolicyRef.set((HmacAuthenticationPolicy) invocation.callRealMethod());
return this.authenticationPolicyRef.get();
};
doAnswer(createCommunicationClientCredentialPolicy).when(this.clientBuilder).createAuthenticationPolicy();
Answer<UserAgentPolicy> createUserAgentPolicy = (invocation) -> {
this.userAgentPolicyRef.set(mock(UserAgentPolicy.class));
return this.userAgentPolicyRef.get();
};
doAnswer(createUserAgentPolicy).when(this.clientBuilder).createUserAgentPolicy(any(), any(), any(), any());
Answer<RequestIdPolicy> createRequestIdPolicy = (invocation) -> {
this.requestIdPolicyRef.set(mock(RequestIdPolicy.class));
return this.requestIdPolicyRef.get();
};
doAnswer(createRequestIdPolicy).when(this.clientBuilder).createRequestIdPolicy();
Answer<CookiePolicy> createCookiePolicy = (invocation) -> {
this.cookiePolicyRef.set((CookiePolicy) invocation.callRealMethod());
return this.cookiePolicyRef.get();
};
doAnswer(createCookiePolicy).when(this.clientBuilder).createCookiePolicy();
Answer<HttpLoggingPolicy> createHttpLoggingPolicy = (invocation) -> {
this.httpLoggingPolicyRef.set((HttpLoggingPolicy) invocation.callRealMethod());
return this.httpLoggingPolicyRef.get();
};
doAnswer(createHttpLoggingPolicy).when(this.clientBuilder).createHttpLoggingPolicy(any());
Answer<HttpLogOptions> createDefaultHttpLogOptions = (invocation) -> {
this.defaultHttpLogOptionsRef.set((HttpLogOptions) invocation.callRealMethod());
return this.defaultHttpLogOptionsRef.get();
};
doAnswer(createDefaultHttpLogOptions).when(this.clientBuilder).createDefaultHttpLogOptions();
}
void capturePhoneNumberAdminClientImpl() {
verify(this.clientBuilder, times(1))
.createPhoneNumberAsyncClient(this.phoneNumberAdminClientArg.capture());
}
void captureHttpPipelineSettings() {
verify(this.clientBuilder, times(1))
.createAuthenticationPolicy();
verify(this.clientBuilder, times(1))
.createUserAgentPolicy(
this.uaPolicyAppIdArg.capture(),
this.uaPolicySdkNameArg.capture(),
this.uaPolicySdkVersionArg.capture(),
this.uaPolicyConfigArg.capture());
verify(this.clientBuilder, times(1))
.createHttpLoggingPolicy(this.httpLogOptionsArg.capture());
}
}
} | class PhoneNumbersClientBuilderTest {
private static final String ENDPOINT = "https:
private static final String ACCESSKEY = "QWNjZXNzS2V5";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-communication-phonenumbers.properties");
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private HttpClient httpClient;
private PhoneNumbersClientBuilder clientBuilder;
@BeforeEach
void setUp() {
this.httpClient = mock(HttpClient.class);
this.clientBuilder = Mockito.spy(new PhoneNumbersClientBuilder());
}
@AfterEach
void tearDown() {
Mockito.framework().clearInlineMock(this);
}
@Test()
public void buildClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildClient();
assertNotNull(phoneNumberClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersClient phoneNumberClient =
this.setupBuilderCustomPipeline(httpPipeline).buildClient();
assertNotNull(phoneNumberClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildClient();
assertNotNull(phoneNumberClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildClient();
assertNotNull(phoneNumberClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildClientWithServiceVersion() {
PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildClient();
assertNotNull(phoneNumberClient);
}
@Test()
public void buildClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersClient phoneNumberClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildClient();
assertNotNull(phoneNumberClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildClientNoEndpoint() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildClientNoPipelineNoCredentials() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void buildAsyncClientWithHttpClientWithCredential() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateRequiredSettings(spyHelper);
}
@Test()
public void buildAsyncClientWithCustomPipeline() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpPipeline httpPipeline = mock(HttpPipeline.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderCustomPipeline(httpPipeline).buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateCustomPipeline(spyHelper, httpPipeline);
}
@Test()
public void buildAsyncClientWithLogOptions() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
HttpLogOptions logOptions = mock(HttpLogOptions.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.httpLogOptions(logOptions)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateLogOptions(spyHelper, logOptions);
}
@Test()
public void buildAsyncClientWithConfiguration() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
Configuration configuration = mock(Configuration.class);
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.configuration(configuration)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateConfiguration(spyHelper, configuration);
}
@Test()
public void buildAsyncClientWithServiceVersion() {
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.serviceVersion(PhoneNumbersServiceVersion.V2021_03_07)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
}
@Test()
public void buildAsyncClientWithOneAdditionalPolicy() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientWithMultipleAdditionalPolicies() {
ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder);
List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
additionalPolicies.add(mock(HttpPipelinePolicy.class));
PhoneNumbersAsyncClient phoneNumberAsyncClient =
this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies)
.buildAsyncClient();
assertNotNull(phoneNumberAsyncClient);
validateAdditionalPolicies(spyHelper, additionalPolicies);
}
@Test()
public void buildAsyncClientNoEndpointThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.buildClient();
});
}
@Test()
public void buildAsyncClientNoPipelineNoCredentialsThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient();
});
}
@Test()
public void setEndpointNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.endpoint(null);
});
}
@Test()
public void addPolicyNullThrows() {
assertThrows(NullPointerException.class, () -> {
this.clientBuilder.addPolicy(null);
});
}
@Test
@Test
public void bothRetryOptionsAndRetryPolicySetAsync() {
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
.retryOptions(new RetryOptions(new ExponentialBackoffOptions()))
.retryPolicy(new RetryPolicy())
.buildAsyncClient());
}
private PhoneNumbersClientBuilder setupBuilderWithHttpClientWithCredential(PhoneNumbersClientBuilder clientBuilder) {
return clientBuilder
.endpoint(ENDPOINT)
.httpClient(this.httpClient)
.credential(new AzureKeyCredential(ACCESSKEY));
}
private PhoneNumbersClientBuilder setupBuilderWithPolicies(
PhoneNumbersClientBuilder clientBuilder, List<HttpPipelinePolicy> policies) {
clientBuilder = this.setupBuilderWithHttpClientWithCredential(clientBuilder);
for (HttpPipelinePolicy policy : policies) {
clientBuilder.addPolicy(policy);
}
return clientBuilder;
}
private PhoneNumbersClientBuilder setupBuilderCustomPipeline(HttpPipeline pipeline) {
return clientBuilder
.endpoint(ENDPOINT)
.pipeline(pipeline);
}
private void validateRequiredSettings(ClientBuilderSpyHelper spyHelper) {
spyHelper.capturePhoneNumberAdminClientImpl();
spyHelper.captureHttpPipelineSettings();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(ENDPOINT, phoneNumberManagementClient.getEndpoint());
assertEquals(this.httpClient, phoneNumberManagementClient.getHttpPipeline().getHttpClient());
assertEquals(6, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(5));
assertEquals(spyHelper.defaultHttpLogOptionsRef.get(), spyHelper.httpLogOptionsArg.getValue());
assertEquals(spyHelper.defaultHttpLogOptionsRef.get().getApplicationId(), spyHelper.uaPolicyAppIdArg.getValue());
assertEquals(PROPERTIES.get((SDK_NAME)), spyHelper.uaPolicySdkNameArg.getValue());
assertEquals(PROPERTIES.get((SDK_VERSION)), spyHelper.uaPolicySdkVersionArg.getValue());
assertNull(spyHelper.uaPolicyConfigArg.getValue());
}
private void validateCustomPipeline(ClientBuilderSpyHelper spyHelper, HttpPipeline expectedPipeline) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberAdminClient = spyHelper.phoneNumberAdminClientArg.getValue();
assertEquals(expectedPipeline, phoneNumberAdminClient.getHttpPipeline());
}
private void validateLogOptions(ClientBuilderSpyHelper spyHelper, HttpLogOptions expectedLogOptions) {
spyHelper.captureHttpPipelineSettings();
HttpLogOptions actualLogOptions = spyHelper.httpLogOptionsArg.getValue();
assertEquals(expectedLogOptions, actualLogOptions);
}
private void validateConfiguration(ClientBuilderSpyHelper spyHelper, Configuration expectedConfiguration) {
spyHelper.captureHttpPipelineSettings();
Configuration actualConfiguration = spyHelper.uaPolicyConfigArg.getValue();
assertEquals(expectedConfiguration, actualConfiguration);
}
private void validateAdditionalPolicies(ClientBuilderSpyHelper spyHelper, List<HttpPipelinePolicy> policies) {
spyHelper.capturePhoneNumberAdminClientImpl();
PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue();
int expectedPolicyCount = 6 + policies.size();
int lastPolicyIndex = expectedPolicyCount - 1;
int customPolicyIndex = 5;
assertEquals(expectedPolicyCount, phoneNumberManagementClient.getHttpPipeline().getPolicyCount());
assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0));
assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1));
assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3));
assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4));
assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(lastPolicyIndex));
for (HttpPipelinePolicy policy : policies) {
assertEquals(policy, phoneNumberManagementClient.getHttpPipeline().getPolicy(customPolicyIndex));
customPolicyIndex++;
}
}
private class ClientBuilderSpyHelper {
final PhoneNumbersClientBuilder clientBuilder;
final AtomicReference<HmacAuthenticationPolicy> authenticationPolicyRef = new AtomicReference<>();
final AtomicReference<UserAgentPolicy> userAgentPolicyRef = new AtomicReference<>();
final AtomicReference<RequestIdPolicy> requestIdPolicyRef = new AtomicReference<>();
final AtomicReference<RetryPolicy> retryPolicyRef = new AtomicReference<>();
final AtomicReference<CookiePolicy> cookiePolicyRef = new AtomicReference<>();
final AtomicReference<HttpLoggingPolicy> httpLoggingPolicyRef = new AtomicReference<>();
final AtomicReference<HttpLogOptions> defaultHttpLogOptionsRef = new AtomicReference<>();
final ArgumentCaptor<PhoneNumberAdminClientImpl> phoneNumberAdminClientArg =
ArgumentCaptor.forClass(PhoneNumberAdminClientImpl.class);
final ArgumentCaptor<String> uaPolicyAppIdArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkNameArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<String> uaPolicySdkVersionArg = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<Configuration> uaPolicyConfigArg = ArgumentCaptor.forClass(Configuration.class);
final ArgumentCaptor<HttpLogOptions> httpLogOptionsArg = ArgumentCaptor.forClass(HttpLogOptions.class);
ClientBuilderSpyHelper(PhoneNumbersClientBuilder clientBuilder) {
this.clientBuilder = clientBuilder;
this.initializeSpies();
}
private void initializeSpies() {
Answer<HmacAuthenticationPolicy> createCommunicationClientCredentialPolicy = (invocation) -> {
this.authenticationPolicyRef.set((HmacAuthenticationPolicy) invocation.callRealMethod());
return this.authenticationPolicyRef.get();
};
doAnswer(createCommunicationClientCredentialPolicy).when(this.clientBuilder).createAuthenticationPolicy();
Answer<UserAgentPolicy> createUserAgentPolicy = (invocation) -> {
this.userAgentPolicyRef.set(mock(UserAgentPolicy.class));
return this.userAgentPolicyRef.get();
};
doAnswer(createUserAgentPolicy).when(this.clientBuilder).createUserAgentPolicy(any(), any(), any(), any());
Answer<RequestIdPolicy> createRequestIdPolicy = (invocation) -> {
this.requestIdPolicyRef.set(mock(RequestIdPolicy.class));
return this.requestIdPolicyRef.get();
};
doAnswer(createRequestIdPolicy).when(this.clientBuilder).createRequestIdPolicy();
Answer<CookiePolicy> createCookiePolicy = (invocation) -> {
this.cookiePolicyRef.set((CookiePolicy) invocation.callRealMethod());
return this.cookiePolicyRef.get();
};
doAnswer(createCookiePolicy).when(this.clientBuilder).createCookiePolicy();
Answer<HttpLoggingPolicy> createHttpLoggingPolicy = (invocation) -> {
this.httpLoggingPolicyRef.set((HttpLoggingPolicy) invocation.callRealMethod());
return this.httpLoggingPolicyRef.get();
};
doAnswer(createHttpLoggingPolicy).when(this.clientBuilder).createHttpLoggingPolicy(any());
Answer<HttpLogOptions> createDefaultHttpLogOptions = (invocation) -> {
this.defaultHttpLogOptionsRef.set((HttpLogOptions) invocation.callRealMethod());
return this.defaultHttpLogOptionsRef.get();
};
doAnswer(createDefaultHttpLogOptions).when(this.clientBuilder).createDefaultHttpLogOptions();
}
void capturePhoneNumberAdminClientImpl() {
verify(this.clientBuilder, times(1))
.createPhoneNumberAsyncClient(this.phoneNumberAdminClientArg.capture());
}
void captureHttpPipelineSettings() {
verify(this.clientBuilder, times(1))
.createAuthenticationPolicy();
verify(this.clientBuilder, times(1))
.createUserAgentPolicy(
this.uaPolicyAppIdArg.capture(),
this.uaPolicySdkNameArg.capture(),
this.uaPolicySdkVersionArg.capture(),
this.uaPolicyConfigArg.capture());
verify(this.clientBuilder, times(1))
.createHttpLoggingPolicy(this.httpLogOptionsArg.capture());
}
}
} |
Now do we have tests for all four bulk overloads , 2 for async and 2 for sync ? | public void crudOnDifferentOverload() {
List<EncryptionPojo> actualProperties = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
actualProperties.add(properties);
properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse1 = this.cosmosEncryptionContainer.createItem(properties, new CosmosItemRequestOptions());
assertThat(itemResponse1.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem1 = itemResponse1.getItem();
validateResponse(properties, responseItem1);
actualProperties.add(properties);
properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> upsertResponse1 = this.cosmosEncryptionContainer.upsertItem(properties);
assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem2 = upsertResponse1.getItem();
validateResponse(properties, responseItem2);
actualProperties.add(properties);
properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> upsertResponse2 = this.cosmosEncryptionContainer.upsertItem(properties, new CosmosItemRequestOptions());
assertThat(upsertResponse2.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem3 = upsertResponse2.getItem();
validateResponse(properties, responseItem3);
actualProperties.add(properties);
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(),
new PartitionKey(actualProperties.get(0).getMypk()), EncryptionPojo.class).getItem();
validateResponse(actualProperties.get(0), readItem);
String query = String.format("SELECT * from c where c.id = '%s'", actualProperties.get(1).getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem);
}
}
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<EncryptionPojo> feedResponseIterator2 =
this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = new ArrayList<>();
feedResponseIterator2.iterator().forEachRemaining(pojo -> {
feedResponse2.add(pojo);
});
assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse2) {
if (pojo.getId().equals(properties.getId())) {
EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem);
}
}
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
CosmosItemResponse<EncryptionPojo> replaceResponse =
this.cosmosEncryptionContainer.replaceItem(actualProperties.get(2), actualProperties.get(2).getId(),
new PartitionKey(actualProperties.get(2).getMypk()), requestOptions);
assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(actualProperties.get(2), responseItem);
CosmosItemResponse<?> deleteResponse1 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(1).getId(),
new PartitionKey(actualProperties.get(1).getMypk()), new CosmosItemRequestOptions());
assertThat(deleteResponse1.getStatusCode()).isEqualTo(204);
CosmosItemResponse<?> deleteResponse2 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(2),
new CosmosItemRequestOptions());
assertThat(deleteResponse2.getStatusCode()).isEqualTo(204);
CosmosItemResponse<?> deleteResponse3 = this.cosmosEncryptionContainer.deleteAllItemsByPartitionKey(new PartitionKey(actualProperties.get(3).getMypk()),
new CosmosItemRequestOptions());
assertThat(deleteResponse3.getStatusCode()).isEqualTo(200);
} | EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(), | public void crudOnDifferentOverload() {
List<EncryptionPojo> actualProperties = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
actualProperties.add(properties);
properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse1 = this.cosmosEncryptionContainer.createItem(properties, new CosmosItemRequestOptions());
assertThat(itemResponse1.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem1 = itemResponse1.getItem();
validateResponse(properties, responseItem1);
actualProperties.add(properties);
properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> upsertResponse1 = this.cosmosEncryptionContainer.upsertItem(properties);
assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem2 = upsertResponse1.getItem();
validateResponse(properties, responseItem2);
actualProperties.add(properties);
properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> upsertResponse2 = this.cosmosEncryptionContainer.upsertItem(properties, new CosmosItemRequestOptions());
assertThat(upsertResponse2.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem3 = upsertResponse2.getItem();
validateResponse(properties, responseItem3);
actualProperties.add(properties);
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(),
new PartitionKey(actualProperties.get(0).getMypk()), EncryptionPojo.class).getItem();
validateResponse(actualProperties.get(0), readItem);
String query = String.format("SELECT * from c where c.id = '%s'", actualProperties.get(1).getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem);
}
}
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<EncryptionPojo> feedResponseIterator2 =
this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = new ArrayList<>();
feedResponseIterator2.iterator().forEachRemaining(pojo -> {
feedResponse2.add(pojo);
});
assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse2) {
if (pojo.getId().equals(properties.getId())) {
EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem);
}
}
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
CosmosItemResponse<EncryptionPojo> replaceResponse =
this.cosmosEncryptionContainer.replaceItem(actualProperties.get(2), actualProperties.get(2).getId(),
new PartitionKey(actualProperties.get(2).getMypk()), requestOptions);
assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(actualProperties.get(2), responseItem);
CosmosItemResponse<?> deleteResponse1 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(1).getId(),
new PartitionKey(actualProperties.get(1).getMypk()), new CosmosItemRequestOptions());
assertThat(deleteResponse1.getStatusCode()).isEqualTo(204);
CosmosItemResponse<?> deleteResponse2 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(2),
new CosmosItemRequestOptions());
assertThat(deleteResponse2.getStatusCode()).isEqualTo(204);
CosmosItemResponse<?> deleteResponse3 = this.cosmosEncryptionContainer.deleteAllItemsByPartitionKey(new PartitionKey(actualProperties.get(3).getMypk()),
new CosmosItemRequestOptions());
assertThat(deleteResponse3.getStatusCode()).isEqualTo(200);
} | class EncryptionSyncApiCrudTest extends TestSuiteBase {
private CosmosClient client;
private CosmosEncryptionClient cosmosEncryptionClient;
private CosmosEncryptionContainer cosmosEncryptionContainer;
@Factory(dataProvider = "clientBuildersWithSessionConsistency")
public EncryptionSyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider encryptionKeyStoreProvider =
new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider();
this.cosmosEncryptionClient = CosmosEncryptionClient.createCosmosEncryptionClient(this.client,
encryptionKeyStoreProvider);
this.cosmosEncryptionContainer = getSharedSyncEncryptionContainer(this.cosmosEncryptionClient);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(),
new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(),
new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions,
EncryptionPojo.class);
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions());
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions());
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions());
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<EncryptionPojo> pojoCosmosPagedIterable =
this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
pojoCosmosPagedIterable.iterableByPage(continuationToken, 1);
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosBatch.createItemOperation(createPojo);
cosmosBatch.replaceItemOperation(itemId, replacePojo);
cosmosBatch.upsertItemOperation(createPojo);
cosmosBatch.readItemOperation(itemId);
cosmosBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch);
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo, cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions());
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void patchItem() {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionContainer.createItem(createPojo,
new PartitionKey(createPojo.getMypk()), new CosmosItemRequestOptions());
int originalSensitiveInt = createPojo.getSensitiveInt();
int newSensitiveInt = originalSensitiveInt + 1;
String itemIdToReplace = UUID.randomUUID().toString();
EncryptionPojo nestedEncryptionPojoToReplace = getItem(itemIdToReplace);
nestedEncryptionPojoToReplace.setSensitiveString("testing");
CosmosPatchOperations cosmosPatchOperations = CosmosPatchOperations.create();
cosmosPatchOperations.add("/sensitiveString", "patched");
cosmosPatchOperations.remove("/sensitiveDouble");
cosmosPatchOperations.replace("/sensitiveInt", newSensitiveInt);
cosmosPatchOperations.replace("/sensitiveNestedPojo", nestedEncryptionPojoToReplace);
cosmosPatchOperations.set("/sensitiveBoolean", false);
CosmosPatchItemRequestOptions options = new CosmosPatchItemRequestOptions();
CosmosItemResponse<EncryptionPojo> response = this.cosmosEncryptionContainer.patchItem(
createPojo.getId(),
new PartitionKey(createPojo.getMypk()),
cosmosPatchOperations,
options,
EncryptionPojo.class);
assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
EncryptionPojo patchedItem = response.getItem();
assertThat(patchedItem).isNotNull();
assertThat(patchedItem.getSensitiveString()).isEqualTo("patched");
assertThat(patchedItem.getSensitiveDouble()).isNull();
assertThat(patchedItem.getSensitiveNestedPojo()).isNotNull();
assertThat(patchedItem.getSensitiveInt()).isEqualTo(newSensitiveInt);
assertThat(patchedItem.isSensitiveBoolean()).isEqualTo(false);
response = this.cosmosEncryptionContainer.readItem(
createPojo.getId(),
new PartitionKey(createPojo.getMypk()),
options,
EncryptionPojo.class);
assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
validateResponse(patchedItem, response.getItem());
}
private int getTotalRequest() {
int countRequest = new Random().nextInt(100) + 120;
logger.info("Total count of request for this test case: " + countRequest);
return countRequest;
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_createItem() {
int totalRequest = getTotalRequest();
Map<String, EncryptionPojo> idToItemMap = new HashMap<>();
List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
idToItemMap.put(itemId, createPojo);
cosmosItemOperationsList.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer.
executeBulkOperations(cosmosItemOperationsList));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
validateResponse(item, idToItemMap.get(item.getId()));
}
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_upsertItem() {
int totalRequest = getTotalRequest();
Map<String, EncryptionPojo> idToItemMap = new HashMap<>();
List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
idToItemMap.put(itemId, createPojo);
cosmosItemOperationsList.add(CosmosBulkOperations.getUpsertItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer.
executeBulkOperations(cosmosItemOperationsList, new CosmosBulkExecutionOptions()));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
validateResponse(item, idToItemMap.get(item.getId()));
}
;
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_deleteItem() {
int totalRequest = Math.min(getTotalRequest(), 20);
List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
createItemsAndVerify(cosmosItemOperations);
List<CosmosItemOperation> deleteCosmosItemOperations = new ArrayList<>();
for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) {
EncryptionPojo encryptionPojo = cosmosItemOperation.getItem();
deleteCosmosItemOperations.add(CosmosBulkOperations.getDeleteItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue()));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer
.executeBulkOperations(deleteCosmosItemOperations));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
}
;
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_readItem() {
int totalRequest = getTotalRequest();
List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>();
Map<String, EncryptionPojo> idToItemMap = new HashMap<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
idToItemMap.put(itemId, createPojo);
cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
createItemsAndVerify(cosmosItemOperations);
List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>();
for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) {
EncryptionPojo encryptionPojo = cosmosItemOperation.getItem();
readCosmosItemOperations.add(CosmosBulkOperations.getReadItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue()));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer
.executeBulkOperations(readCosmosItemOperations));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
validateResponse(item, idToItemMap.get(item.getId()));
}
;
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) {
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> createResponseFlux = Lists.newArrayList(this.cosmosEncryptionContainer.
executeBulkOperations(cosmosItemOperations));
Set<String> distinctIndex = new HashSet<>();
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : createResponseFlux) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo encryptionPojo = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
distinctIndex.add(encryptionPojo.getId());
}
;
assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size());
assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size());
}
} | class EncryptionSyncApiCrudTest extends TestSuiteBase {
private CosmosClient client;
private CosmosEncryptionClient cosmosEncryptionClient;
private CosmosEncryptionContainer cosmosEncryptionContainer;
@Factory(dataProvider = "clientBuildersWithSessionConsistency")
public EncryptionSyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider encryptionKeyStoreProvider =
new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider();
this.cosmosEncryptionClient = CosmosEncryptionClient.createCosmosEncryptionClient(this.client,
encryptionKeyStoreProvider);
this.cosmosEncryptionContainer = getSharedSyncEncryptionContainer(this.cosmosEncryptionClient);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(),
new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(),
new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions,
EncryptionPojo.class);
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions());
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<EncryptionPojo> feedResponseIterator =
this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = new ArrayList<>();
feedResponseIterator.iterator().forEachRemaining(pojo -> {
feedResponse.add(pojo);
});
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions());
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions());
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions());
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<EncryptionPojo> pojoCosmosPagedIterable =
this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
pojoCosmosPagedIterable.iterableByPage(continuationToken, 1);
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosBatch.createItemOperation(createPojo);
cosmosBatch.replaceItemOperation(itemId, replacePojo);
cosmosBatch.upsertItemOperation(createPojo);
cosmosBatch.readItemOperation(itemId);
cosmosBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch);
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo, cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions());
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void patchItem() {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionContainer.createItem(createPojo,
new PartitionKey(createPojo.getMypk()), new CosmosItemRequestOptions());
int originalSensitiveInt = createPojo.getSensitiveInt();
int newSensitiveInt = originalSensitiveInt + 1;
String itemIdToReplace = UUID.randomUUID().toString();
EncryptionPojo nestedEncryptionPojoToReplace = getItem(itemIdToReplace);
nestedEncryptionPojoToReplace.setSensitiveString("testing");
CosmosPatchOperations cosmosPatchOperations = CosmosPatchOperations.create();
cosmosPatchOperations.add("/sensitiveString", "patched");
cosmosPatchOperations.remove("/sensitiveDouble");
cosmosPatchOperations.replace("/sensitiveInt", newSensitiveInt);
cosmosPatchOperations.replace("/sensitiveNestedPojo", nestedEncryptionPojoToReplace);
cosmosPatchOperations.set("/sensitiveBoolean", false);
CosmosPatchItemRequestOptions options = new CosmosPatchItemRequestOptions();
CosmosItemResponse<EncryptionPojo> response = this.cosmosEncryptionContainer.patchItem(
createPojo.getId(),
new PartitionKey(createPojo.getMypk()),
cosmosPatchOperations,
options,
EncryptionPojo.class);
assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
EncryptionPojo patchedItem = response.getItem();
assertThat(patchedItem).isNotNull();
assertThat(patchedItem.getSensitiveString()).isEqualTo("patched");
assertThat(patchedItem.getSensitiveDouble()).isNull();
assertThat(patchedItem.getSensitiveNestedPojo()).isNotNull();
assertThat(patchedItem.getSensitiveInt()).isEqualTo(newSensitiveInt);
assertThat(patchedItem.isSensitiveBoolean()).isEqualTo(false);
response = this.cosmosEncryptionContainer.readItem(
createPojo.getId(),
new PartitionKey(createPojo.getMypk()),
options,
EncryptionPojo.class);
assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
validateResponse(patchedItem, response.getItem());
}
private int getTotalRequest() {
int countRequest = new Random().nextInt(100) + 120;
logger.info("Total count of request for this test case: " + countRequest);
return countRequest;
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_createItem() {
int totalRequest = getTotalRequest();
Map<String, EncryptionPojo> idToItemMap = new HashMap<>();
List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
idToItemMap.put(itemId, createPojo);
cosmosItemOperationsList.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer.
executeBulkOperations(cosmosItemOperationsList));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
validateResponse(item, idToItemMap.get(item.getId()));
}
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_upsertItem() {
int totalRequest = getTotalRequest();
Map<String, EncryptionPojo> idToItemMap = new HashMap<>();
List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
idToItemMap.put(itemId, createPojo);
cosmosItemOperationsList.add(CosmosBulkOperations.getUpsertItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer.
executeBulkOperations(cosmosItemOperationsList, new CosmosBulkExecutionOptions()));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
validateResponse(item, idToItemMap.get(item.getId()));
}
;
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_deleteItem() {
int totalRequest = Math.min(getTotalRequest(), 20);
List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
createItemsAndVerify(cosmosItemOperations);
List<CosmosItemOperation> deleteCosmosItemOperations = new ArrayList<>();
for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) {
EncryptionPojo encryptionPojo = cosmosItemOperation.getItem();
deleteCosmosItemOperations.add(CosmosBulkOperations.getDeleteItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue()));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer
.executeBulkOperations(deleteCosmosItemOperations));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
}
;
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void bulkExecution_readItem() {
int totalRequest = getTotalRequest();
List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>();
Map<String, EncryptionPojo> idToItemMap = new HashMap<>();
for (int i = 0; i < totalRequest; i++) {
String itemId = UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
idToItemMap.put(itemId, createPojo);
cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk())));
}
createItemsAndVerify(cosmosItemOperations);
List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>();
for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) {
EncryptionPojo encryptionPojo = cosmosItemOperation.getItem();
readCosmosItemOperations.add(CosmosBulkOperations.getReadItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue()));
}
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer
.executeBulkOperations(readCosmosItemOperations));
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
validateResponse(item, idToItemMap.get(item.getId()));
}
;
assertThat(processedDoc.get()).isEqualTo(totalRequest);
}
private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) {
List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> createResponseFlux = Lists.newArrayList(this.cosmosEncryptionContainer.
executeBulkOperations(cosmosItemOperations));
Set<String> distinctIndex = new HashSet<>();
AtomicInteger processedDoc = new AtomicInteger(0);
for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : createResponseFlux) {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
EncryptionPojo encryptionPojo = cosmosBulkItemResponse.getItem(EncryptionPojo.class);
distinctIndex.add(encryptionPojo.getId());
}
;
assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size());
assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size());
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.