comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
given the same test fails on CI with different actual values: > expected: <10000> but was: <3442> expected: <10000> but was: <7552> expected: <10000> but was: <2170> I wonder this is due to JUnit running test cases in parallel? where both tests use the shared static map (one tests clears while another one is inserting)
public void parseUniqueURLs() { for (int i = 0; i < 10000; i++) { UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=" + i); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=" + i, urlBuilder.toString()); } assertEquals(10000, UrlBuilder.getParsedUrls().size()); UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=10001"); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=10001", urlBuilder.toString()); assertEquals(1, UrlBuilder.getParsedUrls().size()); urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=10001"); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=10001", urlBuilder.toString()); assertEquals(1, UrlBuilder.getParsedUrls().size()); }
assertEquals(10000, UrlBuilder.getParsedUrls().size());
public void parseUniqueURLs() { IntStream.range(0, 100000) .parallel() .forEach(i -> { UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=" + i); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=" + i, urlBuilder.toString()); }); assertTrue(UrlBuilder.getParsedUrls().size() <= 10000); }
class UrlBuilderTests { @Test public void scheme() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); assertEquals("http: } @Test public void schemeWhenSchemeIsNull() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(null); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(""); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme("https"); assertEquals("https", builder.getScheme()); } @Test public void schemeWhenSchemeContainsTerminator() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertNull(builder.getHost()); assertEquals("http: } @Test public void schemeWhenSchemeContainsHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("http: } @Test public void schemeAndHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com"); assertEquals("http: } @Test public void schemeAndHostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.exa mple.com"); assertEquals("http: } @Test public void host() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostIsNull() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(null); assertNull(builder.getHost()); } @Test public void hostWhenHostIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(""); assertNull(builder.getHost()); } @Test public void hostWhenHostIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost("www.bing.com"); assertEquals("www.bing.com", builder.getHost()); } @Test public void hostWhenHostContainsSchemeTerminator() { final UrlBuilder builder = new UrlBuilder() .setHost(": assertNull(builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsScheme() { final UrlBuilder builder = new UrlBuilder() .setHost("https: assertEquals("https", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("https: } @Test public void hostWhenHostContainsColonButNoPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:"); assertEquals("www.example.com", builder.getHost()); assertNull(builder.getPort()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:1234"); assertEquals("www.example.com", builder.getHost()); assertEquals(1234, builder.getPort()); assertEquals("www.example.com:1234", builder.toString()); } @Test public void hostWhenHostContainsForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/"); assertEquals("www.example.com", builder.getHost()); assertEquals("/", builder.getPath()); assertEquals("www.example.com/", builder.toString()); } @Test public void hostWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("/index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void hostWhenHostContainsQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?"); assertEquals("www.example.com", builder.getHost()); assertEquals(0, builder.getQuery().size()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?a=b"); assertEquals("www.example.com", builder.getHost()); assertThat(builder.toString(), CoreMatchers.containsString("a=b")); assertEquals("www.example.com?a=b", builder.toString()); } @Test public void hostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setHost("www.exampl e.com"); assertEquals("www.exampl e.com", builder.toString()); } @Test public void hostAndPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHost() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHostAndBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithWhitespaceInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my path"); assertEquals("www.example.com/my path", builder.toString()); } @Test public void hostAndPathWithPlusInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my+path"); assertEquals("www.example.com/my+path", builder.toString()); } @Test public void hostAndPathWithPercent20InPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my%20path"); assertEquals("www.example.com/my%20path", builder.toString()); } @Test public void portInt() { final UrlBuilder builder = new UrlBuilder() .setPort(50); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithNull() { final UrlBuilder builder = new UrlBuilder() .setPort(null); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portStringWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(""); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portString() { final UrlBuilder builder = new UrlBuilder() .setPort("50"); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/"); assertEquals(50, builder.getPort()); assertEquals("/", builder.getPath()); assertEquals(":50/", builder.toString()); } @Test public void portStringPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/index.html"); assertEquals(50, builder.getPort()); assertEquals("/index.html", builder.getPath()); assertEquals(":50/index.html", builder.toString()); } @Test public void portStringWithQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?"); assertEquals(50, builder.getPort()); assertEquals(0, builder.getQuery().size()); assertEquals(":50", builder.toString()); } @Test public void portStringQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?a=b&c=d"); assertEquals(50, builder.getPort()); assertThat(builder.toString(), CoreMatchers.containsString("?a=b&c=d")); assertEquals(":50?a=b&c=d", builder.toString()); } @Test public void portStringWhenPortIsNull() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(null); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(""); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort("123"); assertEquals(123, builder.getPort()); } @Test public void schemeAndHostAndOneQueryParameter() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App%20les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go od"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go%20od"); assertEquals("http: } @Test public void schemeAndHostAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D"); assertEquals("http: } @Test public void schemeAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D") .setPath("index.html"); assertEquals("http: } @Test public void pathWhenBuilderPathIsNullAndPathIsNull() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPath() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsNull() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPath() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/site") .setPath("index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void pathFirstWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("index.html") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void emptyPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void slashPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath(" .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void withAbsolutePath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: assertEquals("http: } @Test public void queryInPath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("mypath?thing=stuff") .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void withAbsolutePathAndQuery() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void queryWithNull() { final UrlBuilder builder = new UrlBuilder() .setQuery(null); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setQuery(""); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithQuestionMark() { final UrlBuilder builder = new UrlBuilder() .setQuery("?"); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void parseWithNullString() { final UrlBuilder builder = UrlBuilder.parse((String) null); assertEquals("", builder.toString()); } @Test public void parseWithEmpty() { final UrlBuilder builder = UrlBuilder.parse(""); assertEquals("", builder.toString()); } @Test public void parseHost() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com"); assertEquals("www.bing.com", builder.toString()); } @Test public void parseWithProtocolAndHost() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:8080"); assertEquals("www.bing.com:8080", builder.toString()); } @Test public void parseWithProtocolAndHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/my/path"); assertEquals("www.bing.com/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:1234/my/path"); assertEquals("www.bing.com:1234/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1"); assertEquals("www.bing.com?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1"); assertEquals("www.bing.com:123?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1"); assertEquals("www.bing.com/folder/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1"); assertEquals("www.bing.com:123/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1&b=2"); assertEquals("www.bing.com?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1&b=2"); assertEquals("www.bing.com:123?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1&b=2"); assertEquals("www.bing.com/folder/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1&b=2"); assertEquals("www.bing.com:123/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseWithColonInPath() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseURLWithNull() { final UrlBuilder builder = UrlBuilder.parse((URL) null); assertEquals("", builder.toString()); } @Test public void parseURLSchemeAndHost() throws MalformedURLException { final UrlBuilder builder = UrlBuilder.parse(new URL("http: assertEquals("http: } @Test public void parallelParsing() throws InterruptedException { Thread.UncaughtExceptionHandler handler = mock(Thread.UncaughtExceptionHandler.class); ForkJoinPool pool = new ForkJoinPool(Runtime.getRuntime().availableProcessors(), ForkJoinPool.defaultForkJoinWorkerThreadFactory, handler, false); AtomicInteger callCount = new AtomicInteger(); List<Callable<UrlBuilder>> tasks = IntStream.range(0, 100000) .mapToObj(i -> (Callable<UrlBuilder>) () -> { callCount.incrementAndGet(); return UrlBuilder.parse("https: }) .collect(Collectors.toList()); pool.invokeAll(tasks); pool.shutdown(); assertTrue(pool.awaitTermination(10, TimeUnit.SECONDS)); assertEquals(100000, callCount.get()); } @Test public void fluxParallelParsing() { Mono<Long> mono = Flux.range(0, 100000) .parallel() .map(i -> UrlBuilder.parse("https: .sequential() .count(); StepVerifier.create(mono) .assertNext(count -> assertEquals(100000, count)) .verifyComplete(); } @Test @Test public void parseUniqueUrlsInParallel() { IntStream.range(0, 100000) .parallel() .forEach(i -> { UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=" + i); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=" + i, urlBuilder.toString()); }); } }
class UrlBuilderTests { @Test public void scheme() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); assertEquals("http: } @Test public void schemeWhenSchemeIsNull() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(null); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(""); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme("https"); assertEquals("https", builder.getScheme()); } @Test public void schemeWhenSchemeContainsTerminator() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertNull(builder.getHost()); assertEquals("http: } @Test public void schemeWhenSchemeContainsHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("http: } @Test public void schemeAndHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com"); assertEquals("http: } @Test public void schemeAndHostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.exa mple.com"); assertEquals("http: } @Test public void host() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostIsNull() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(null); assertNull(builder.getHost()); } @Test public void hostWhenHostIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(""); assertNull(builder.getHost()); } @Test public void hostWhenHostIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost("www.bing.com"); assertEquals("www.bing.com", builder.getHost()); } @Test public void hostWhenHostContainsSchemeTerminator() { final UrlBuilder builder = new UrlBuilder() .setHost(": assertNull(builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsScheme() { final UrlBuilder builder = new UrlBuilder() .setHost("https: assertEquals("https", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("https: } @Test public void hostWhenHostContainsColonButNoPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:"); assertEquals("www.example.com", builder.getHost()); assertNull(builder.getPort()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:1234"); assertEquals("www.example.com", builder.getHost()); assertEquals(1234, builder.getPort()); assertEquals("www.example.com:1234", builder.toString()); } @Test public void hostWhenHostContainsForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/"); assertEquals("www.example.com", builder.getHost()); assertEquals("/", builder.getPath()); assertEquals("www.example.com/", builder.toString()); } @Test public void hostWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("/index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void hostWhenHostContainsQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?"); assertEquals("www.example.com", builder.getHost()); assertEquals(0, builder.getQuery().size()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?a=b"); assertEquals("www.example.com", builder.getHost()); assertThat(builder.toString(), CoreMatchers.containsString("a=b")); assertEquals("www.example.com?a=b", builder.toString()); } @Test public void hostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setHost("www.exampl e.com"); assertEquals("www.exampl e.com", builder.toString()); } @Test public void hostAndPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHost() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHostAndBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithWhitespaceInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my path"); assertEquals("www.example.com/my path", builder.toString()); } @Test public void hostAndPathWithPlusInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my+path"); assertEquals("www.example.com/my+path", builder.toString()); } @Test public void hostAndPathWithPercent20InPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my%20path"); assertEquals("www.example.com/my%20path", builder.toString()); } @Test public void portInt() { final UrlBuilder builder = new UrlBuilder() .setPort(50); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithNull() { final UrlBuilder builder = new UrlBuilder() .setPort(null); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portStringWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(""); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portString() { final UrlBuilder builder = new UrlBuilder() .setPort("50"); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/"); assertEquals(50, builder.getPort()); assertEquals("/", builder.getPath()); assertEquals(":50/", builder.toString()); } @Test public void portStringPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/index.html"); assertEquals(50, builder.getPort()); assertEquals("/index.html", builder.getPath()); assertEquals(":50/index.html", builder.toString()); } @Test public void portStringWithQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?"); assertEquals(50, builder.getPort()); assertEquals(0, builder.getQuery().size()); assertEquals(":50", builder.toString()); } @Test public void portStringQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?a=b&c=d"); assertEquals(50, builder.getPort()); assertThat(builder.toString(), CoreMatchers.containsString("?a=b&c=d")); assertEquals(":50?a=b&c=d", builder.toString()); } @Test public void portStringWhenPortIsNull() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(null); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(""); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort("123"); assertEquals(123, builder.getPort()); } @Test public void schemeAndHostAndOneQueryParameter() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App%20les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go od"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go%20od"); assertEquals("http: } @Test public void schemeAndHostAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D"); assertEquals("http: } @Test public void schemeAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D") .setPath("index.html"); assertEquals("http: } @Test public void pathWhenBuilderPathIsNullAndPathIsNull() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPath() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsNull() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPath() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/site") .setPath("index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void pathFirstWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("index.html") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void emptyPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void slashPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath(" .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void withAbsolutePath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: assertEquals("http: } @Test public void queryInPath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("mypath?thing=stuff") .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void withAbsolutePathAndQuery() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void queryWithNull() { final UrlBuilder builder = new UrlBuilder() .setQuery(null); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setQuery(""); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithQuestionMark() { final UrlBuilder builder = new UrlBuilder() .setQuery("?"); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void parseWithNullString() { final UrlBuilder builder = UrlBuilder.parse((String) null); assertEquals("", builder.toString()); } @Test public void parseWithEmpty() { final UrlBuilder builder = UrlBuilder.parse(""); assertEquals("", builder.toString()); } @Test public void parseHost() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com"); assertEquals("www.bing.com", builder.toString()); } @Test public void parseWithProtocolAndHost() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:8080"); assertEquals("www.bing.com:8080", builder.toString()); } @Test public void parseWithProtocolAndHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/my/path"); assertEquals("www.bing.com/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:1234/my/path"); assertEquals("www.bing.com:1234/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1"); assertEquals("www.bing.com?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1"); assertEquals("www.bing.com:123?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1"); assertEquals("www.bing.com/folder/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1"); assertEquals("www.bing.com:123/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1&b=2"); assertEquals("www.bing.com?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1&b=2"); assertEquals("www.bing.com:123?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1&b=2"); assertEquals("www.bing.com/folder/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1&b=2"); assertEquals("www.bing.com:123/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseWithColonInPath() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseURLWithNull() { final UrlBuilder builder = UrlBuilder.parse((URL) null); assertEquals("", builder.toString()); } @Test public void parseURLSchemeAndHost() throws MalformedURLException { final UrlBuilder builder = UrlBuilder.parse(new URL("http: assertEquals("http: } @Test public void parallelParsing() throws InterruptedException { Thread.UncaughtExceptionHandler handler = mock(Thread.UncaughtExceptionHandler.class); ForkJoinPool pool = new ForkJoinPool(Runtime.getRuntime().availableProcessors(), ForkJoinPool.defaultForkJoinWorkerThreadFactory, handler, false); AtomicInteger callCount = new AtomicInteger(); List<Callable<UrlBuilder>> tasks = IntStream.range(0, 100000) .mapToObj(i -> (Callable<UrlBuilder>) () -> { callCount.incrementAndGet(); return UrlBuilder.parse("https: }) .collect(Collectors.toList()); pool.invokeAll(tasks); pool.shutdown(); assertTrue(pool.awaitTermination(10, TimeUnit.SECONDS)); assertEquals(100000, callCount.get()); } @Test public void fluxParallelParsing() { Mono<Long> mono = Flux.range(0, 100000) .parallel() .map(i -> UrlBuilder.parse("https: .sequential() .count(); StepVerifier.create(mono) .assertNext(count -> assertEquals(100000, count)) .verifyComplete(); } @Test }
We could try merging these two (parseUniqueURLs, parseUniqueUrlsInParallel) into one test so that there is no parallel tests to execute?
public void parseUniqueURLs() { for (int i = 0; i < 10000; i++) { UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=" + i); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=" + i, urlBuilder.toString()); } assertEquals(10000, UrlBuilder.getParsedUrls().size()); UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=10001"); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=10001", urlBuilder.toString()); assertEquals(1, UrlBuilder.getParsedUrls().size()); urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=10001"); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=10001", urlBuilder.toString()); assertEquals(1, UrlBuilder.getParsedUrls().size()); }
assertEquals(10000, UrlBuilder.getParsedUrls().size());
public void parseUniqueURLs() { IntStream.range(0, 100000) .parallel() .forEach(i -> { UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=" + i); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=" + i, urlBuilder.toString()); }); assertTrue(UrlBuilder.getParsedUrls().size() <= 10000); }
class UrlBuilderTests { @Test public void scheme() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); assertEquals("http: } @Test public void schemeWhenSchemeIsNull() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(null); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(""); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme("https"); assertEquals("https", builder.getScheme()); } @Test public void schemeWhenSchemeContainsTerminator() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertNull(builder.getHost()); assertEquals("http: } @Test public void schemeWhenSchemeContainsHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("http: } @Test public void schemeAndHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com"); assertEquals("http: } @Test public void schemeAndHostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.exa mple.com"); assertEquals("http: } @Test public void host() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostIsNull() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(null); assertNull(builder.getHost()); } @Test public void hostWhenHostIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(""); assertNull(builder.getHost()); } @Test public void hostWhenHostIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost("www.bing.com"); assertEquals("www.bing.com", builder.getHost()); } @Test public void hostWhenHostContainsSchemeTerminator() { final UrlBuilder builder = new UrlBuilder() .setHost(": assertNull(builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsScheme() { final UrlBuilder builder = new UrlBuilder() .setHost("https: assertEquals("https", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("https: } @Test public void hostWhenHostContainsColonButNoPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:"); assertEquals("www.example.com", builder.getHost()); assertNull(builder.getPort()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:1234"); assertEquals("www.example.com", builder.getHost()); assertEquals(1234, builder.getPort()); assertEquals("www.example.com:1234", builder.toString()); } @Test public void hostWhenHostContainsForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/"); assertEquals("www.example.com", builder.getHost()); assertEquals("/", builder.getPath()); assertEquals("www.example.com/", builder.toString()); } @Test public void hostWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("/index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void hostWhenHostContainsQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?"); assertEquals("www.example.com", builder.getHost()); assertEquals(0, builder.getQuery().size()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?a=b"); assertEquals("www.example.com", builder.getHost()); assertThat(builder.toString(), CoreMatchers.containsString("a=b")); assertEquals("www.example.com?a=b", builder.toString()); } @Test public void hostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setHost("www.exampl e.com"); assertEquals("www.exampl e.com", builder.toString()); } @Test public void hostAndPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHost() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHostAndBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithWhitespaceInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my path"); assertEquals("www.example.com/my path", builder.toString()); } @Test public void hostAndPathWithPlusInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my+path"); assertEquals("www.example.com/my+path", builder.toString()); } @Test public void hostAndPathWithPercent20InPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my%20path"); assertEquals("www.example.com/my%20path", builder.toString()); } @Test public void portInt() { final UrlBuilder builder = new UrlBuilder() .setPort(50); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithNull() { final UrlBuilder builder = new UrlBuilder() .setPort(null); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portStringWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(""); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portString() { final UrlBuilder builder = new UrlBuilder() .setPort("50"); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/"); assertEquals(50, builder.getPort()); assertEquals("/", builder.getPath()); assertEquals(":50/", builder.toString()); } @Test public void portStringPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/index.html"); assertEquals(50, builder.getPort()); assertEquals("/index.html", builder.getPath()); assertEquals(":50/index.html", builder.toString()); } @Test public void portStringWithQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?"); assertEquals(50, builder.getPort()); assertEquals(0, builder.getQuery().size()); assertEquals(":50", builder.toString()); } @Test public void portStringQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?a=b&c=d"); assertEquals(50, builder.getPort()); assertThat(builder.toString(), CoreMatchers.containsString("?a=b&c=d")); assertEquals(":50?a=b&c=d", builder.toString()); } @Test public void portStringWhenPortIsNull() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(null); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(""); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort("123"); assertEquals(123, builder.getPort()); } @Test public void schemeAndHostAndOneQueryParameter() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App%20les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go od"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go%20od"); assertEquals("http: } @Test public void schemeAndHostAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D"); assertEquals("http: } @Test public void schemeAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D") .setPath("index.html"); assertEquals("http: } @Test public void pathWhenBuilderPathIsNullAndPathIsNull() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPath() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsNull() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPath() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/site") .setPath("index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void pathFirstWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("index.html") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void emptyPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void slashPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath(" .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void withAbsolutePath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: assertEquals("http: } @Test public void queryInPath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("mypath?thing=stuff") .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void withAbsolutePathAndQuery() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void queryWithNull() { final UrlBuilder builder = new UrlBuilder() .setQuery(null); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setQuery(""); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithQuestionMark() { final UrlBuilder builder = new UrlBuilder() .setQuery("?"); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void parseWithNullString() { final UrlBuilder builder = UrlBuilder.parse((String) null); assertEquals("", builder.toString()); } @Test public void parseWithEmpty() { final UrlBuilder builder = UrlBuilder.parse(""); assertEquals("", builder.toString()); } @Test public void parseHost() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com"); assertEquals("www.bing.com", builder.toString()); } @Test public void parseWithProtocolAndHost() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:8080"); assertEquals("www.bing.com:8080", builder.toString()); } @Test public void parseWithProtocolAndHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/my/path"); assertEquals("www.bing.com/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:1234/my/path"); assertEquals("www.bing.com:1234/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1"); assertEquals("www.bing.com?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1"); assertEquals("www.bing.com:123?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1"); assertEquals("www.bing.com/folder/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1"); assertEquals("www.bing.com:123/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1&b=2"); assertEquals("www.bing.com?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1&b=2"); assertEquals("www.bing.com:123?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1&b=2"); assertEquals("www.bing.com/folder/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1&b=2"); assertEquals("www.bing.com:123/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseWithColonInPath() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseURLWithNull() { final UrlBuilder builder = UrlBuilder.parse((URL) null); assertEquals("", builder.toString()); } @Test public void parseURLSchemeAndHost() throws MalformedURLException { final UrlBuilder builder = UrlBuilder.parse(new URL("http: assertEquals("http: } @Test public void parallelParsing() throws InterruptedException { Thread.UncaughtExceptionHandler handler = mock(Thread.UncaughtExceptionHandler.class); ForkJoinPool pool = new ForkJoinPool(Runtime.getRuntime().availableProcessors(), ForkJoinPool.defaultForkJoinWorkerThreadFactory, handler, false); AtomicInteger callCount = new AtomicInteger(); List<Callable<UrlBuilder>> tasks = IntStream.range(0, 100000) .mapToObj(i -> (Callable<UrlBuilder>) () -> { callCount.incrementAndGet(); return UrlBuilder.parse("https: }) .collect(Collectors.toList()); pool.invokeAll(tasks); pool.shutdown(); assertTrue(pool.awaitTermination(10, TimeUnit.SECONDS)); assertEquals(100000, callCount.get()); } @Test public void fluxParallelParsing() { Mono<Long> mono = Flux.range(0, 100000) .parallel() .map(i -> UrlBuilder.parse("https: .sequential() .count(); StepVerifier.create(mono) .assertNext(count -> assertEquals(100000, count)) .verifyComplete(); } @Test @Test public void parseUniqueUrlsInParallel() { IntStream.range(0, 100000) .parallel() .forEach(i -> { UrlBuilder urlBuilder = UrlBuilder.parse("www.bing.com:123/index.html?a=" + i); assertNotNull(urlBuilder); assertEquals("www.bing.com:123/index.html?a=" + i, urlBuilder.toString()); }); } }
class UrlBuilderTests { @Test public void scheme() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); assertEquals("http: } @Test public void schemeWhenSchemeIsNull() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(null); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme(""); assertNull(builder.getScheme()); } @Test public void schemeWhenSchemeIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setScheme("http"); builder.setScheme("https"); assertEquals("https", builder.getScheme()); } @Test public void schemeWhenSchemeContainsTerminator() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertNull(builder.getHost()); assertEquals("http: } @Test public void schemeWhenSchemeContainsHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http: assertEquals("http", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("http: } @Test public void schemeAndHost() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com"); assertEquals("http: } @Test public void schemeAndHostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.exa mple.com"); assertEquals("http: } @Test public void host() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostIsNull() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(null); assertNull(builder.getHost()); } @Test public void hostWhenHostIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost(""); assertNull(builder.getHost()); } @Test public void hostWhenHostIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com"); builder.setHost("www.bing.com"); assertEquals("www.bing.com", builder.getHost()); } @Test public void hostWhenHostContainsSchemeTerminator() { final UrlBuilder builder = new UrlBuilder() .setHost(": assertNull(builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsScheme() { final UrlBuilder builder = new UrlBuilder() .setHost("https: assertEquals("https", builder.getScheme()); assertEquals("www.example.com", builder.getHost()); assertEquals("https: } @Test public void hostWhenHostContainsColonButNoPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:"); assertEquals("www.example.com", builder.getHost()); assertNull(builder.getPort()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsPort() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com:1234"); assertEquals("www.example.com", builder.getHost()); assertEquals(1234, builder.getPort()); assertEquals("www.example.com:1234", builder.toString()); } @Test public void hostWhenHostContainsForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/"); assertEquals("www.example.com", builder.getHost()); assertEquals("/", builder.getPath()); assertEquals("www.example.com/", builder.toString()); } @Test public void hostWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("/index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void hostWhenHostContainsQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?"); assertEquals("www.example.com", builder.getHost()); assertEquals(0, builder.getQuery().size()); assertEquals("www.example.com", builder.toString()); } @Test public void hostWhenHostContainsQuery() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com?a=b"); assertEquals("www.example.com", builder.getHost()); assertThat(builder.toString(), CoreMatchers.containsString("a=b")); assertEquals("www.example.com?a=b", builder.toString()); } @Test public void hostWhenHostHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setHost("www.exampl e.com"); assertEquals("www.exampl e.com", builder.toString()); } @Test public void hostAndPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHost() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithSlashAfterHostAndBeforePath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/") .setPath("/my/path"); assertEquals("www.example.com/my/path", builder.toString()); } @Test public void hostAndPathWithWhitespaceInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my path"); assertEquals("www.example.com/my path", builder.toString()); } @Test public void hostAndPathWithPlusInPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my+path"); assertEquals("www.example.com/my+path", builder.toString()); } @Test public void hostAndPathWithPercent20InPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com") .setPath("my%20path"); assertEquals("www.example.com/my%20path", builder.toString()); } @Test public void portInt() { final UrlBuilder builder = new UrlBuilder() .setPort(50); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithNull() { final UrlBuilder builder = new UrlBuilder() .setPort(null); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portStringWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(""); assertNull(builder.getPort()); assertEquals("", builder.toString()); } @Test public void portString() { final UrlBuilder builder = new UrlBuilder() .setPort("50"); assertEquals(50, builder.getPort()); assertEquals(":50", builder.toString()); } @Test public void portStringWithForwardSlashButNoPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/"); assertEquals(50, builder.getPort()); assertEquals("/", builder.getPath()); assertEquals(":50/", builder.toString()); } @Test public void portStringPath() { final UrlBuilder builder = new UrlBuilder() .setPort("50/index.html"); assertEquals(50, builder.getPort()); assertEquals("/index.html", builder.getPath()); assertEquals(":50/index.html", builder.toString()); } @Test public void portStringWithQuestionMarkButNoQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?"); assertEquals(50, builder.getPort()); assertEquals(0, builder.getQuery().size()); assertEquals(":50", builder.toString()); } @Test public void portStringQuery() { final UrlBuilder builder = new UrlBuilder() .setPort("50?a=b&c=d"); assertEquals(50, builder.getPort()); assertThat(builder.toString(), CoreMatchers.containsString("?a=b&c=d")); assertEquals(":50?a=b&c=d", builder.toString()); } @Test public void portStringWhenPortIsNull() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(null); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort(""); assertNull(builder.getPort()); } @Test public void portStringWhenPortIsNotEmpty() { final UrlBuilder builder = new UrlBuilder() .setPort(8080); builder.setPort("123"); assertEquals(123, builder.getPort()); } @Test public void schemeAndHostAndOneQueryParameter() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterNameHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("App%20les", "B"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasWhitespace() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go od"); assertEquals("http: } @Test public void schemeAndHostAndOneQueryParameterWhenQueryParameterValueHasPercent20() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("Apples", "Go%20od"); assertEquals("http: } @Test public void schemeAndHostAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D"); assertEquals("http: } @Test public void schemeAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setQueryParameter("A", "B") .setQueryParameter("C", "D") .setPath("index.html"); assertEquals("http: } @Test public void pathWhenBuilderPathIsNullAndPathIsNull() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder(); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsNullAndPath() { final UrlBuilder builder = new UrlBuilder(); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsNull() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(null); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsEmptyString() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath(""); assertNull(builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPathIsForwardSlash() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("/"); assertEquals("/", builder.getPath()); } @Test public void pathWhenBuilderPathIsForwardSlashAndPath() { final UrlBuilder builder = new UrlBuilder() .setPath("/"); builder.setPath("test/path.html"); assertEquals("test/path.html", builder.getPath()); } @Test public void pathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setHost("www.example.com/site") .setPath("index.html"); assertEquals("www.example.com", builder.getHost()); assertEquals("index.html", builder.getPath()); assertEquals("www.example.com/index.html", builder.toString()); } @Test public void pathFirstWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("index.html") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void emptyPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath("") .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void slashPathWhenHostContainsPath() { final UrlBuilder builder = new UrlBuilder() .setPath(" .setHost("www.example.com/site"); assertEquals("www.example.com", builder.getHost()); assertEquals("/site", builder.getPath()); assertEquals("www.example.com/site", builder.toString()); } @Test public void withAbsolutePath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: assertEquals("http: } @Test public void queryInPath() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("mypath?thing=stuff") .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void withAbsolutePathAndQuery() { final UrlBuilder builder = new UrlBuilder() .setScheme("http") .setHost("www.example.com") .setPath("http: .setQueryParameter("otherthing", "otherstuff"); assertEquals("http: } @Test public void queryWithNull() { final UrlBuilder builder = new UrlBuilder() .setQuery(null); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithEmpty() { final UrlBuilder builder = new UrlBuilder() .setQuery(""); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void queryWithQuestionMark() { final UrlBuilder builder = new UrlBuilder() .setQuery("?"); assertEquals(0, builder.getQuery().size()); assertEquals("", builder.toString()); } @Test public void parseWithNullString() { final UrlBuilder builder = UrlBuilder.parse((String) null); assertEquals("", builder.toString()); } @Test public void parseWithEmpty() { final UrlBuilder builder = UrlBuilder.parse(""); assertEquals("", builder.toString()); } @Test public void parseHost() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com"); assertEquals("www.bing.com", builder.toString()); } @Test public void parseWithProtocolAndHost() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:8080"); assertEquals("www.bing.com:8080", builder.toString()); } @Test public void parseWithProtocolAndHostAndPort() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/my/path"); assertEquals("www.bing.com/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:1234/my/path"); assertEquals("www.bing.com:1234/my/path", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPath() { final UrlBuilder builder = UrlBuilder.parse("ftp: assertEquals("ftp: } @Test public void parseHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1"); assertEquals("www.bing.com?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1"); assertEquals("www.bing.com:123?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1"); assertEquals("www.bing.com/folder/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1"); assertEquals("www.bing.com:123/index.html?a=1", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndOneQueryParameter() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com?a=1&b=2"); assertEquals("www.bing.com?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123?a=1&b=2"); assertEquals("www.bing.com:123?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com/folder/index.html?a=1&b=2"); assertEquals("www.bing.com/folder/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("www.bing.com:123/index.html?a=1&b=2"); assertEquals("www.bing.com:123/index.html?a=1&b=2", builder.toString()); } @Test public void parseWithProtocolAndHostAndPortAndPathAndTwoQueryParameters() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseWithColonInPath() { final UrlBuilder builder = UrlBuilder.parse("https: assertEquals("https: } @Test public void parseURLWithNull() { final UrlBuilder builder = UrlBuilder.parse((URL) null); assertEquals("", builder.toString()); } @Test public void parseURLSchemeAndHost() throws MalformedURLException { final UrlBuilder builder = UrlBuilder.parse(new URL("http: assertEquals("http: } @Test public void parallelParsing() throws InterruptedException { Thread.UncaughtExceptionHandler handler = mock(Thread.UncaughtExceptionHandler.class); ForkJoinPool pool = new ForkJoinPool(Runtime.getRuntime().availableProcessors(), ForkJoinPool.defaultForkJoinWorkerThreadFactory, handler, false); AtomicInteger callCount = new AtomicInteger(); List<Callable<UrlBuilder>> tasks = IntStream.range(0, 100000) .mapToObj(i -> (Callable<UrlBuilder>) () -> { callCount.incrementAndGet(); return UrlBuilder.parse("https: }) .collect(Collectors.toList()); pool.invokeAll(tasks); pool.shutdown(); assertTrue(pool.awaitTermination(10, TimeUnit.SECONDS)); assertEquals(100000, callCount.get()); } @Test public void fluxParallelParsing() { Mono<Long> mono = Flux.range(0, 100000) .parallel() .map(i -> UrlBuilder.parse("https: .sequential() .count(); StepVerifier.create(mono) .assertNext(count -> assertEquals(100000, count)) .verifyComplete(); } @Test }
Add tests
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
final String lastContinuationToken = lease.getContinuationToken();
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
@j82w We have tests that ensure CFP properly recovers while split occurs. However because the nature of how CFP works, receiving duplicates is expected and trying to time exactly when to look for the state of the current continuation tokens in the leases (which btw are internal and not directly accessible), it will potentially add flakiness in our CI. I will continue to explore how to add a potential test but given the urgency of this fix, this is not a priority at the moment.
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
final String lastContinuationToken = lease.getContinuationToken();
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
We have similar test scenarios for partition split in the rest of the SDK components, the following can be used as example for adding tests in CFP for partition split: 1. mocking unit test: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/query/DocumentProducerTest.java#L110 2. end to end test: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/ReadMyWritesConsistencyTest.java
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
final String lastContinuationToken = lease.getContinuationToken();
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
The main issue is not about not having a split test, we already have that. The bug is about the content of the resulting leases after the split, which is information that is actively updated and not directly exposed to the test as a set of public APIs. We can use the "BridgeInternals" to directly access the internal CFP classes and call in directly into the methods we want to unit test. Alternatively and the approach we expect to mirror customer scenarios is to use the public APIs provided and the CFP to monitor itself, in this case the lease collection. Any updates to that collection can be monitored and validated real time. The only complication with this approach is that timing for the various checks can be thrown off depending on connectivity, thread scheduling and other conditions. In the current test we introduce enough of a lag between the various checks to account for any such situations.
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
final String lastContinuationToken = lease.getContinuationToken();
public Flux<Lease> splitPartition(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } final String leaseToken = lease.getLeaseToken(); final String lastContinuationToken = lease.getContinuationToken(); logger.info("Partition {} is gone due to split; will attempt to resume using continuation token {}.", leaseToken, lastContinuationToken); return this.enumPartitionKeyRanges() .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) .map(PartitionKeyRange::getId) .collectList() .flatMapMany(addedLeaseTokens -> { if (addedLeaseTokens.size() == 0) { logger.error("Partition {} had split but we failed to find at least one child partition", leaseToken); throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); } return Flux.fromIterable(addedLeaseTokens); }) .flatMap(addedRangeId -> { return this.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); }, this.degreeOfParallelism) .map(newLease -> { logger.info("Partition {} split into new partition with lease token {} and continuation token {}.", leaseToken, newLease.getLeaseToken(), lastContinuationToken); return newLease; }); }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
class PartitionSynchronizerImpl implements PartitionSynchronizer { private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); private final ChangeFeedContextClient documentClient; private final CosmosAsyncContainer collectionSelfLink; private final LeaseContainer leaseContainer; private final LeaseManager leaseManager; private final int degreeOfParallelism; private final int maxBatchSize; private final String collectionResourceId; public PartitionSynchronizerImpl( ChangeFeedContextClient documentClient, CosmosAsyncContainer collectionSelfLink, LeaseContainer leaseContainer, LeaseManager leaseManager, int degreeOfParallelism, int maxBatchSize, String collectionResourceId) { this.documentClient = documentClient; this.collectionSelfLink = collectionSelfLink; this.leaseContainer = leaseContainer; this.leaseManager = leaseManager; this.degreeOfParallelism = degreeOfParallelism; this.maxBatchSize = maxBatchSize; this.collectionResourceId = collectionResourceId; } @Override public Mono<Void> createMissingLeases() { return this.enumPartitionKeyRanges() .map(Resource::getId) .collectList() .flatMap( partitionKeyRangeIds -> { Set<String> leaseTokens = new HashSet<>(partitionKeyRangeIds); return this.createLeases(leaseTokens).then(); }) .onErrorResume( throwable -> { return Mono.empty(); }); } @Override private Flux<PartitionKeyRange> enumPartitionKeyRanges() { String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); ModelBridgeInternal.setQueryRequestOptionsContinuationTokenAndMaxItemCount(cosmosQueryRequestOptions, null, this.maxBatchSize); return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, cosmosQueryRequestOptions) .map(FeedResponse::getResults) .flatMap(Flux::fromIterable) .onErrorResume(throwable -> { return Flux.empty(); }); } /** * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. * <p> * Leases are created without the continuation token. It means partitions will be read according to * 'From Beginning' or 'From current time'. * Same applies also to split partitions. We do not search for parent lease and take continuation token since this * might end up of reprocessing all the events since the split. * * @param leaseTokens a hash set of all the lease tokens. * @return a deferred computation of this call. */ private Flux<Lease> createLeases(Set<String> leaseTokens) { Set<String> addedLeaseTokens = new HashSet<>(leaseTokens); return this.leaseContainer.getAllLeases() .map(lease -> { if (lease != null) { addedLeaseTokens.remove(lease.getLeaseToken()); } return lease; }) .thenMany(Flux.fromIterable(addedLeaseTokens) .flatMap( addedRangeId -> this.leaseManager.createLeaseIfNotExist(addedRangeId, null), this.degreeOfParallelism) .map( lease -> { return lease; }) ); } }
nit: Since you're passing in the name of the action, I think it might sound better to say something like: ```suggestion "Currently, the service can accept up to one %s. Duplicate actions not supported.", actionType))); ``` but feel free to leave as is
private void validateActionsNumber(Object[] actions, String actionType) { if (actions != null && actions.length > 1) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Currently, the service can accept up to one %s action per action type only", actionType))); } }
"Currently, the service can accept up to one %s action per action type only", actionType)));
private void validateActionsNumber(Object[] actions, String actionType) { if (actions != null && actions.length > 1) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Currently, the service can accept up to one %s. Multiple actions of the same type are not supported.", actionType))); } }
class TextAnalyticsActions { private final ClientLogger logger = new ClientLogger(TextAnalyticsActions.class); private String displayName; private Iterable<RecognizeEntitiesAction> recognizeEntitiesActions; private Iterable<RecognizeLinkedEntitiesAction> recognizeLinkedEntitiesActions; private Iterable<RecognizePiiEntitiesAction> recognizePiiEntitiesActions; private Iterable<ExtractKeyPhrasesAction> extractKeyPhrasesActions; private Iterable<AnalyzeSentimentAction> analyzeSentimentActions; /** * Get the custom name for the actions. * * @return the custom name for the actions. */ public String getDisplayName() { return displayName; } /** * Set the custom name for the actions. * * @param displayName the custom name for the actions. * * @return the {@link TextAnalyticsActions} object itself. */ public TextAnalyticsActions setDisplayName(String displayName) { this.displayName = displayName; return this; } /** * Get the list of {@link RecognizeEntitiesAction} to be executed. * * @return the list of {@link RecognizeEntitiesAction} to be executed. */ public Iterable<RecognizeEntitiesAction> getRecognizeEntitiesActions() { return this.recognizeEntitiesActions; } /** * Set the list of {@link RecognizeEntitiesAction} to be executed. * * @param recognizeEntitiesActions the list of {@link RecognizeEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if duplicate {@link RecognizeEntitiesAction} actions are passed. */ public TextAnalyticsActions setRecognizeEntitiesActions(RecognizeEntitiesAction... recognizeEntitiesActions) { validateActionsNumber(recognizeEntitiesActions, RecognizeEntitiesAction.class.getName()); this.recognizeEntitiesActions = recognizeEntitiesActions == null ? null : Arrays.asList(recognizeEntitiesActions); return this; } /** * Get the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the list of {@link RecognizeLinkedEntitiesAction} to be executed. */ public Iterable<RecognizeLinkedEntitiesAction> getRecognizeLinkedEntitiesActions() { return this.recognizeLinkedEntitiesActions; } /** * Set the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @param recognizeLinkedEntitiesActions the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if duplicate {@link RecognizeLinkedEntitiesAction} actions are passed. */ public TextAnalyticsActions setRecognizeLinkedEntitiesActions( RecognizeLinkedEntitiesAction... recognizeLinkedEntitiesActions) { validateActionsNumber(recognizeLinkedEntitiesActions, RecognizeLinkedEntitiesAction.class.getName()); this.recognizeLinkedEntitiesActions = recognizeLinkedEntitiesActions == null ? null : Arrays.asList(recognizeLinkedEntitiesActions); return this; } /** * Get the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the list of {@link RecognizePiiEntitiesAction} to be executed. */ public Iterable<RecognizePiiEntitiesAction> getRecognizePiiEntitiesActions() { return this.recognizePiiEntitiesActions; } /** * Set the list of {@link RecognizePiiEntitiesAction} to be executed. * * @param recognizePiiEntitiesActions the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if duplicate {@link RecognizePiiEntitiesAction} actions are passed. */ public TextAnalyticsActions setRecognizePiiEntitiesActions( RecognizePiiEntitiesAction... recognizePiiEntitiesActions) { validateActionsNumber(recognizePiiEntitiesActions, RecognizePiiEntitiesAction.class.getName()); this.recognizePiiEntitiesActions = recognizePiiEntitiesActions == null ? null : Arrays.asList(recognizePiiEntitiesActions); return this; } /** * Get the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the list of {@link ExtractKeyPhrasesAction} to be executed. */ public Iterable<ExtractKeyPhrasesAction> getExtractKeyPhrasesActions() { return this.extractKeyPhrasesActions; } /** * Set the list of {@link ExtractKeyPhrasesAction} to be executed. * * @param extractKeyPhrasesActions the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if duplicate {@link ExtractKeyPhrasesAction} actions are passed. */ public TextAnalyticsActions setExtractKeyPhrasesActions(ExtractKeyPhrasesAction... extractKeyPhrasesActions) { validateActionsNumber(extractKeyPhrasesActions, ExtractKeyPhrasesAction.class.getName()); this.extractKeyPhrasesActions = extractKeyPhrasesActions == null ? null : Arrays.asList(extractKeyPhrasesActions); return this; } /** * Get the list of {@link AnalyzeSentimentAction} to be executed. * * @return the list of {@link AnalyzeSentimentAction} to be executed. */ public Iterable<AnalyzeSentimentAction> getAnalyzeSentimentActions() { return this.analyzeSentimentActions; } /** * Set the list of {@link AnalyzeSentimentAction} to be executed. * * @param analyzeSentimentActions the list of {@link AnalyzeSentimentAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if duplicate {@link AnalyzeSentimentAction} actions are passed. */ public TextAnalyticsActions setAnalyzeSentimentActions(AnalyzeSentimentAction... analyzeSentimentActions) { validateActionsNumber(analyzeSentimentActions, AnalyzeSentimentAction.class.getName()); this.analyzeSentimentActions = analyzeSentimentActions == null ? null : Arrays.asList(analyzeSentimentActions); return this; } }
class TextAnalyticsActions { private final ClientLogger logger = new ClientLogger(TextAnalyticsActions.class); private String displayName; private Iterable<RecognizeEntitiesAction> recognizeEntitiesActions; private Iterable<RecognizeLinkedEntitiesAction> recognizeLinkedEntitiesActions; private Iterable<RecognizePiiEntitiesAction> recognizePiiEntitiesActions; private Iterable<ExtractKeyPhrasesAction> extractKeyPhrasesActions; private Iterable<AnalyzeSentimentAction> analyzeSentimentActions; /** * Get the custom name for the actions. * * @return the custom name for the actions. */ public String getDisplayName() { return displayName; } /** * Set the custom name for the actions. * * @param displayName the custom name for the actions. * * @return the {@link TextAnalyticsActions} object itself. */ public TextAnalyticsActions setDisplayName(String displayName) { this.displayName = displayName; return this; } /** * Get the list of {@link RecognizeEntitiesAction} to be executed. * * @return the list of {@link RecognizeEntitiesAction} to be executed. */ public Iterable<RecognizeEntitiesAction> getRecognizeEntitiesActions() { return this.recognizeEntitiesActions; } /** * Set the list of {@link RecognizeEntitiesAction} to be executed. * * @param recognizeEntitiesActions the list of {@link RecognizeEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeEntitiesActions(RecognizeEntitiesAction... recognizeEntitiesActions) { validateActionsNumber(recognizeEntitiesActions, RecognizeEntitiesAction.class.getName()); this.recognizeEntitiesActions = recognizeEntitiesActions == null ? null : Arrays.asList(recognizeEntitiesActions); return this; } /** * Get the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the list of {@link RecognizeLinkedEntitiesAction} to be executed. */ public Iterable<RecognizeLinkedEntitiesAction> getRecognizeLinkedEntitiesActions() { return this.recognizeLinkedEntitiesActions; } /** * Set the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @param recognizeLinkedEntitiesActions the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeLinkedEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeLinkedEntitiesActions( RecognizeLinkedEntitiesAction... recognizeLinkedEntitiesActions) { validateActionsNumber(recognizeLinkedEntitiesActions, RecognizeLinkedEntitiesAction.class.getName()); this.recognizeLinkedEntitiesActions = recognizeLinkedEntitiesActions == null ? null : Arrays.asList(recognizeLinkedEntitiesActions); return this; } /** * Get the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the list of {@link RecognizePiiEntitiesAction} to be executed. */ public Iterable<RecognizePiiEntitiesAction> getRecognizePiiEntitiesActions() { return this.recognizePiiEntitiesActions; } /** * Set the list of {@link RecognizePiiEntitiesAction} to be executed. * * @param recognizePiiEntitiesActions the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizePiiEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizePiiEntitiesActions( RecognizePiiEntitiesAction... recognizePiiEntitiesActions) { validateActionsNumber(recognizePiiEntitiesActions, RecognizePiiEntitiesAction.class.getName()); this.recognizePiiEntitiesActions = recognizePiiEntitiesActions == null ? null : Arrays.asList(recognizePiiEntitiesActions); return this; } /** * Get the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the list of {@link ExtractKeyPhrasesAction} to be executed. */ public Iterable<ExtractKeyPhrasesAction> getExtractKeyPhrasesActions() { return this.extractKeyPhrasesActions; } /** * Set the list of {@link ExtractKeyPhrasesAction} to be executed. * * @param extractKeyPhrasesActions the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link ExtractKeyPhrasesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setExtractKeyPhrasesActions(ExtractKeyPhrasesAction... extractKeyPhrasesActions) { validateActionsNumber(extractKeyPhrasesActions, ExtractKeyPhrasesAction.class.getName()); this.extractKeyPhrasesActions = extractKeyPhrasesActions == null ? null : Arrays.asList(extractKeyPhrasesActions); return this; } /** * Get the list of {@link AnalyzeSentimentAction} to be executed. * * @return the list of {@link AnalyzeSentimentAction} to be executed. */ public Iterable<AnalyzeSentimentAction> getAnalyzeSentimentActions() { return this.analyzeSentimentActions; } /** * Set the list of {@link AnalyzeSentimentAction} to be executed. * * @param analyzeSentimentActions the list of {@link AnalyzeSentimentAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link AnalyzeSentimentAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setAnalyzeSentimentActions(AnalyzeSentimentAction... analyzeSentimentActions) { validateActionsNumber(analyzeSentimentActions, AnalyzeSentimentAction.class.getName()); this.analyzeSentimentActions = analyzeSentimentActions == null ? null : Arrays.asList(analyzeSentimentActions); return this; } }
One last suggestion: ```suggestion "Currently, the service can accept up to one %s. Multiple actions of the same type are not supported.", actionType))); ```
private void validateActionsNumber(Object[] actions, String actionType) { if (actions != null && actions.length > 1) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Currently, the service can accept up to one %s. Duplicate actions not supported.", actionType))); } }
"Currently, the service can accept up to one %s. Duplicate actions not supported.", actionType)));
private void validateActionsNumber(Object[] actions, String actionType) { if (actions != null && actions.length > 1) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Currently, the service can accept up to one %s. Multiple actions of the same type are not supported.", actionType))); } }
class TextAnalyticsActions { private final ClientLogger logger = new ClientLogger(TextAnalyticsActions.class); private String displayName; private Iterable<RecognizeEntitiesAction> recognizeEntitiesActions; private Iterable<RecognizeLinkedEntitiesAction> recognizeLinkedEntitiesActions; private Iterable<RecognizePiiEntitiesAction> recognizePiiEntitiesActions; private Iterable<ExtractKeyPhrasesAction> extractKeyPhrasesActions; private Iterable<AnalyzeSentimentAction> analyzeSentimentActions; /** * Get the custom name for the actions. * * @return the custom name for the actions. */ public String getDisplayName() { return displayName; } /** * Set the custom name for the actions. * * @param displayName the custom name for the actions. * * @return the {@link TextAnalyticsActions} object itself. */ public TextAnalyticsActions setDisplayName(String displayName) { this.displayName = displayName; return this; } /** * Get the list of {@link RecognizeEntitiesAction} to be executed. * * @return the list of {@link RecognizeEntitiesAction} to be executed. */ public Iterable<RecognizeEntitiesAction> getRecognizeEntitiesActions() { return this.recognizeEntitiesActions; } /** * Set the list of {@link RecognizeEntitiesAction} to be executed. * * @param recognizeEntitiesActions the list of {@link RecognizeEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeEntitiesActions(RecognizeEntitiesAction... recognizeEntitiesActions) { validateActionsNumber(recognizeEntitiesActions, RecognizeEntitiesAction.class.getName()); this.recognizeEntitiesActions = recognizeEntitiesActions == null ? null : Arrays.asList(recognizeEntitiesActions); return this; } /** * Get the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the list of {@link RecognizeLinkedEntitiesAction} to be executed. */ public Iterable<RecognizeLinkedEntitiesAction> getRecognizeLinkedEntitiesActions() { return this.recognizeLinkedEntitiesActions; } /** * Set the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @param recognizeLinkedEntitiesActions the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeLinkedEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeLinkedEntitiesActions( RecognizeLinkedEntitiesAction... recognizeLinkedEntitiesActions) { validateActionsNumber(recognizeLinkedEntitiesActions, RecognizeLinkedEntitiesAction.class.getName()); this.recognizeLinkedEntitiesActions = recognizeLinkedEntitiesActions == null ? null : Arrays.asList(recognizeLinkedEntitiesActions); return this; } /** * Get the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the list of {@link RecognizePiiEntitiesAction} to be executed. */ public Iterable<RecognizePiiEntitiesAction> getRecognizePiiEntitiesActions() { return this.recognizePiiEntitiesActions; } /** * Set the list of {@link RecognizePiiEntitiesAction} to be executed. * * @param recognizePiiEntitiesActions the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizePiiEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizePiiEntitiesActions( RecognizePiiEntitiesAction... recognizePiiEntitiesActions) { validateActionsNumber(recognizePiiEntitiesActions, RecognizePiiEntitiesAction.class.getName()); this.recognizePiiEntitiesActions = recognizePiiEntitiesActions == null ? null : Arrays.asList(recognizePiiEntitiesActions); return this; } /** * Get the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the list of {@link ExtractKeyPhrasesAction} to be executed. */ public Iterable<ExtractKeyPhrasesAction> getExtractKeyPhrasesActions() { return this.extractKeyPhrasesActions; } /** * Set the list of {@link ExtractKeyPhrasesAction} to be executed. * * @param extractKeyPhrasesActions the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link ExtractKeyPhrasesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setExtractKeyPhrasesActions(ExtractKeyPhrasesAction... extractKeyPhrasesActions) { validateActionsNumber(extractKeyPhrasesActions, ExtractKeyPhrasesAction.class.getName()); this.extractKeyPhrasesActions = extractKeyPhrasesActions == null ? null : Arrays.asList(extractKeyPhrasesActions); return this; } /** * Get the list of {@link AnalyzeSentimentAction} to be executed. * * @return the list of {@link AnalyzeSentimentAction} to be executed. */ public Iterable<AnalyzeSentimentAction> getAnalyzeSentimentActions() { return this.analyzeSentimentActions; } /** * Set the list of {@link AnalyzeSentimentAction} to be executed. * * @param analyzeSentimentActions the list of {@link AnalyzeSentimentAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link AnalyzeSentimentAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setAnalyzeSentimentActions(AnalyzeSentimentAction... analyzeSentimentActions) { validateActionsNumber(analyzeSentimentActions, AnalyzeSentimentAction.class.getName()); this.analyzeSentimentActions = analyzeSentimentActions == null ? null : Arrays.asList(analyzeSentimentActions); return this; } }
class TextAnalyticsActions { private final ClientLogger logger = new ClientLogger(TextAnalyticsActions.class); private String displayName; private Iterable<RecognizeEntitiesAction> recognizeEntitiesActions; private Iterable<RecognizeLinkedEntitiesAction> recognizeLinkedEntitiesActions; private Iterable<RecognizePiiEntitiesAction> recognizePiiEntitiesActions; private Iterable<ExtractKeyPhrasesAction> extractKeyPhrasesActions; private Iterable<AnalyzeSentimentAction> analyzeSentimentActions; /** * Get the custom name for the actions. * * @return the custom name for the actions. */ public String getDisplayName() { return displayName; } /** * Set the custom name for the actions. * * @param displayName the custom name for the actions. * * @return the {@link TextAnalyticsActions} object itself. */ public TextAnalyticsActions setDisplayName(String displayName) { this.displayName = displayName; return this; } /** * Get the list of {@link RecognizeEntitiesAction} to be executed. * * @return the list of {@link RecognizeEntitiesAction} to be executed. */ public Iterable<RecognizeEntitiesAction> getRecognizeEntitiesActions() { return this.recognizeEntitiesActions; } /** * Set the list of {@link RecognizeEntitiesAction} to be executed. * * @param recognizeEntitiesActions the list of {@link RecognizeEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeEntitiesActions(RecognizeEntitiesAction... recognizeEntitiesActions) { validateActionsNumber(recognizeEntitiesActions, RecognizeEntitiesAction.class.getName()); this.recognizeEntitiesActions = recognizeEntitiesActions == null ? null : Arrays.asList(recognizeEntitiesActions); return this; } /** * Get the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the list of {@link RecognizeLinkedEntitiesAction} to be executed. */ public Iterable<RecognizeLinkedEntitiesAction> getRecognizeLinkedEntitiesActions() { return this.recognizeLinkedEntitiesActions; } /** * Set the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @param recognizeLinkedEntitiesActions the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeLinkedEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeLinkedEntitiesActions( RecognizeLinkedEntitiesAction... recognizeLinkedEntitiesActions) { validateActionsNumber(recognizeLinkedEntitiesActions, RecognizeLinkedEntitiesAction.class.getName()); this.recognizeLinkedEntitiesActions = recognizeLinkedEntitiesActions == null ? null : Arrays.asList(recognizeLinkedEntitiesActions); return this; } /** * Get the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the list of {@link RecognizePiiEntitiesAction} to be executed. */ public Iterable<RecognizePiiEntitiesAction> getRecognizePiiEntitiesActions() { return this.recognizePiiEntitiesActions; } /** * Set the list of {@link RecognizePiiEntitiesAction} to be executed. * * @param recognizePiiEntitiesActions the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizePiiEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizePiiEntitiesActions( RecognizePiiEntitiesAction... recognizePiiEntitiesActions) { validateActionsNumber(recognizePiiEntitiesActions, RecognizePiiEntitiesAction.class.getName()); this.recognizePiiEntitiesActions = recognizePiiEntitiesActions == null ? null : Arrays.asList(recognizePiiEntitiesActions); return this; } /** * Get the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the list of {@link ExtractKeyPhrasesAction} to be executed. */ public Iterable<ExtractKeyPhrasesAction> getExtractKeyPhrasesActions() { return this.extractKeyPhrasesActions; } /** * Set the list of {@link ExtractKeyPhrasesAction} to be executed. * * @param extractKeyPhrasesActions the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link ExtractKeyPhrasesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setExtractKeyPhrasesActions(ExtractKeyPhrasesAction... extractKeyPhrasesActions) { validateActionsNumber(extractKeyPhrasesActions, ExtractKeyPhrasesAction.class.getName()); this.extractKeyPhrasesActions = extractKeyPhrasesActions == null ? null : Arrays.asList(extractKeyPhrasesActions); return this; } /** * Get the list of {@link AnalyzeSentimentAction} to be executed. * * @return the list of {@link AnalyzeSentimentAction} to be executed. */ public Iterable<AnalyzeSentimentAction> getAnalyzeSentimentActions() { return this.analyzeSentimentActions; } /** * Set the list of {@link AnalyzeSentimentAction} to be executed. * * @param analyzeSentimentActions the list of {@link AnalyzeSentimentAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link AnalyzeSentimentAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setAnalyzeSentimentActions(AnalyzeSentimentAction... analyzeSentimentActions) { validateActionsNumber(analyzeSentimentActions, AnalyzeSentimentAction.class.getName()); this.analyzeSentimentActions = analyzeSentimentActions == null ? null : Arrays.asList(analyzeSentimentActions); return this; } }
+1 to @deyaaeldeen 's error message.
private void validateActionsNumber(Object[] actions, String actionType) { if (actions != null && actions.length > 1) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Currently, the service can accept up to one %s. Duplicate actions not supported.", actionType))); } }
"Currently, the service can accept up to one %s. Duplicate actions not supported.", actionType)));
private void validateActionsNumber(Object[] actions, String actionType) { if (actions != null && actions.length > 1) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Currently, the service can accept up to one %s. Multiple actions of the same type are not supported.", actionType))); } }
class TextAnalyticsActions { private final ClientLogger logger = new ClientLogger(TextAnalyticsActions.class); private String displayName; private Iterable<RecognizeEntitiesAction> recognizeEntitiesActions; private Iterable<RecognizeLinkedEntitiesAction> recognizeLinkedEntitiesActions; private Iterable<RecognizePiiEntitiesAction> recognizePiiEntitiesActions; private Iterable<ExtractKeyPhrasesAction> extractKeyPhrasesActions; private Iterable<AnalyzeSentimentAction> analyzeSentimentActions; /** * Get the custom name for the actions. * * @return the custom name for the actions. */ public String getDisplayName() { return displayName; } /** * Set the custom name for the actions. * * @param displayName the custom name for the actions. * * @return the {@link TextAnalyticsActions} object itself. */ public TextAnalyticsActions setDisplayName(String displayName) { this.displayName = displayName; return this; } /** * Get the list of {@link RecognizeEntitiesAction} to be executed. * * @return the list of {@link RecognizeEntitiesAction} to be executed. */ public Iterable<RecognizeEntitiesAction> getRecognizeEntitiesActions() { return this.recognizeEntitiesActions; } /** * Set the list of {@link RecognizeEntitiesAction} to be executed. * * @param recognizeEntitiesActions the list of {@link RecognizeEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeEntitiesActions(RecognizeEntitiesAction... recognizeEntitiesActions) { validateActionsNumber(recognizeEntitiesActions, RecognizeEntitiesAction.class.getName()); this.recognizeEntitiesActions = recognizeEntitiesActions == null ? null : Arrays.asList(recognizeEntitiesActions); return this; } /** * Get the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the list of {@link RecognizeLinkedEntitiesAction} to be executed. */ public Iterable<RecognizeLinkedEntitiesAction> getRecognizeLinkedEntitiesActions() { return this.recognizeLinkedEntitiesActions; } /** * Set the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @param recognizeLinkedEntitiesActions the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeLinkedEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeLinkedEntitiesActions( RecognizeLinkedEntitiesAction... recognizeLinkedEntitiesActions) { validateActionsNumber(recognizeLinkedEntitiesActions, RecognizeLinkedEntitiesAction.class.getName()); this.recognizeLinkedEntitiesActions = recognizeLinkedEntitiesActions == null ? null : Arrays.asList(recognizeLinkedEntitiesActions); return this; } /** * Get the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the list of {@link RecognizePiiEntitiesAction} to be executed. */ public Iterable<RecognizePiiEntitiesAction> getRecognizePiiEntitiesActions() { return this.recognizePiiEntitiesActions; } /** * Set the list of {@link RecognizePiiEntitiesAction} to be executed. * * @param recognizePiiEntitiesActions the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizePiiEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizePiiEntitiesActions( RecognizePiiEntitiesAction... recognizePiiEntitiesActions) { validateActionsNumber(recognizePiiEntitiesActions, RecognizePiiEntitiesAction.class.getName()); this.recognizePiiEntitiesActions = recognizePiiEntitiesActions == null ? null : Arrays.asList(recognizePiiEntitiesActions); return this; } /** * Get the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the list of {@link ExtractKeyPhrasesAction} to be executed. */ public Iterable<ExtractKeyPhrasesAction> getExtractKeyPhrasesActions() { return this.extractKeyPhrasesActions; } /** * Set the list of {@link ExtractKeyPhrasesAction} to be executed. * * @param extractKeyPhrasesActions the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link ExtractKeyPhrasesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setExtractKeyPhrasesActions(ExtractKeyPhrasesAction... extractKeyPhrasesActions) { validateActionsNumber(extractKeyPhrasesActions, ExtractKeyPhrasesAction.class.getName()); this.extractKeyPhrasesActions = extractKeyPhrasesActions == null ? null : Arrays.asList(extractKeyPhrasesActions); return this; } /** * Get the list of {@link AnalyzeSentimentAction} to be executed. * * @return the list of {@link AnalyzeSentimentAction} to be executed. */ public Iterable<AnalyzeSentimentAction> getAnalyzeSentimentActions() { return this.analyzeSentimentActions; } /** * Set the list of {@link AnalyzeSentimentAction} to be executed. * * @param analyzeSentimentActions the list of {@link AnalyzeSentimentAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link AnalyzeSentimentAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setAnalyzeSentimentActions(AnalyzeSentimentAction... analyzeSentimentActions) { validateActionsNumber(analyzeSentimentActions, AnalyzeSentimentAction.class.getName()); this.analyzeSentimentActions = analyzeSentimentActions == null ? null : Arrays.asList(analyzeSentimentActions); return this; } }
class TextAnalyticsActions { private final ClientLogger logger = new ClientLogger(TextAnalyticsActions.class); private String displayName; private Iterable<RecognizeEntitiesAction> recognizeEntitiesActions; private Iterable<RecognizeLinkedEntitiesAction> recognizeLinkedEntitiesActions; private Iterable<RecognizePiiEntitiesAction> recognizePiiEntitiesActions; private Iterable<ExtractKeyPhrasesAction> extractKeyPhrasesActions; private Iterable<AnalyzeSentimentAction> analyzeSentimentActions; /** * Get the custom name for the actions. * * @return the custom name for the actions. */ public String getDisplayName() { return displayName; } /** * Set the custom name for the actions. * * @param displayName the custom name for the actions. * * @return the {@link TextAnalyticsActions} object itself. */ public TextAnalyticsActions setDisplayName(String displayName) { this.displayName = displayName; return this; } /** * Get the list of {@link RecognizeEntitiesAction} to be executed. * * @return the list of {@link RecognizeEntitiesAction} to be executed. */ public Iterable<RecognizeEntitiesAction> getRecognizeEntitiesActions() { return this.recognizeEntitiesActions; } /** * Set the list of {@link RecognizeEntitiesAction} to be executed. * * @param recognizeEntitiesActions the list of {@link RecognizeEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeEntitiesActions(RecognizeEntitiesAction... recognizeEntitiesActions) { validateActionsNumber(recognizeEntitiesActions, RecognizeEntitiesAction.class.getName()); this.recognizeEntitiesActions = recognizeEntitiesActions == null ? null : Arrays.asList(recognizeEntitiesActions); return this; } /** * Get the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the list of {@link RecognizeLinkedEntitiesAction} to be executed. */ public Iterable<RecognizeLinkedEntitiesAction> getRecognizeLinkedEntitiesActions() { return this.recognizeLinkedEntitiesActions; } /** * Set the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @param recognizeLinkedEntitiesActions the list of {@link RecognizeLinkedEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizeLinkedEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizeLinkedEntitiesActions( RecognizeLinkedEntitiesAction... recognizeLinkedEntitiesActions) { validateActionsNumber(recognizeLinkedEntitiesActions, RecognizeLinkedEntitiesAction.class.getName()); this.recognizeLinkedEntitiesActions = recognizeLinkedEntitiesActions == null ? null : Arrays.asList(recognizeLinkedEntitiesActions); return this; } /** * Get the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the list of {@link RecognizePiiEntitiesAction} to be executed. */ public Iterable<RecognizePiiEntitiesAction> getRecognizePiiEntitiesActions() { return this.recognizePiiEntitiesActions; } /** * Set the list of {@link RecognizePiiEntitiesAction} to be executed. * * @param recognizePiiEntitiesActions the list of {@link RecognizePiiEntitiesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link RecognizePiiEntitiesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setRecognizePiiEntitiesActions( RecognizePiiEntitiesAction... recognizePiiEntitiesActions) { validateActionsNumber(recognizePiiEntitiesActions, RecognizePiiEntitiesAction.class.getName()); this.recognizePiiEntitiesActions = recognizePiiEntitiesActions == null ? null : Arrays.asList(recognizePiiEntitiesActions); return this; } /** * Get the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the list of {@link ExtractKeyPhrasesAction} to be executed. */ public Iterable<ExtractKeyPhrasesAction> getExtractKeyPhrasesActions() { return this.extractKeyPhrasesActions; } /** * Set the list of {@link ExtractKeyPhrasesAction} to be executed. * * @param extractKeyPhrasesActions the list of {@link ExtractKeyPhrasesAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link ExtractKeyPhrasesAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setExtractKeyPhrasesActions(ExtractKeyPhrasesAction... extractKeyPhrasesActions) { validateActionsNumber(extractKeyPhrasesActions, ExtractKeyPhrasesAction.class.getName()); this.extractKeyPhrasesActions = extractKeyPhrasesActions == null ? null : Arrays.asList(extractKeyPhrasesActions); return this; } /** * Get the list of {@link AnalyzeSentimentAction} to be executed. * * @return the list of {@link AnalyzeSentimentAction} to be executed. */ public Iterable<AnalyzeSentimentAction> getAnalyzeSentimentActions() { return this.analyzeSentimentActions; } /** * Set the list of {@link AnalyzeSentimentAction} to be executed. * * @param analyzeSentimentActions the list of {@link AnalyzeSentimentAction} to be executed. * * @return the {@link TextAnalyticsActions} object itself. * * @throws IllegalArgumentException if more than one {@link AnalyzeSentimentAction} action are passed in. * Currently service v3.1 only accepts up to one action per type. */ public TextAnalyticsActions setAnalyzeSentimentActions(AnalyzeSentimentAction... analyzeSentimentActions) { validateActionsNumber(analyzeSentimentActions, AnalyzeSentimentAction.class.getName()); this.analyzeSentimentActions = analyzeSentimentActions == null ? null : Arrays.asList(analyzeSentimentActions); return this; } }
Same here - use `IllegalArgumentException`.
private void updateSettingValue() { try { super.setValue(writeSecretReferenceConfigurationSetting(this)); } catch (IOException exception) { LOGGER.logExceptionAsError(new RuntimeException( "Can't parse Secret Reference configuration setting value. Exception:" + exception)); } }
LOGGER.logExceptionAsError(new RuntimeException(
private void updateSettingValue() { try { super.setValue(writeSecretReferenceConfigurationSetting(this)); } catch (IOException exception) { LOGGER.logExceptionAsError(new IllegalArgumentException( "Can't parse Secret Reference configuration setting value.", exception)); } }
class SecretReferenceConfigurationSetting extends ConfigurationSetting { private static final ClientLogger LOGGER = new ClientLogger(SecretReferenceConfigurationSetting.class); private String secretId; private static final String SECRET_REFERENCE_CONTENT_TYPE = "application/vnd.microsoft.appconfig.keyvaultref+json;charset=utf-8"; /** * The constructor for a secret reference configuration setting. * * @param key A key name for this configuration setting. * @param secretId A uri value that used to in the JSON value of setting. e.x., {"uri":"{secretId}"}. */ public SecretReferenceConfigurationSetting(String key, String secretId) { this.secretId = secretId; super.setKey(key); super.setValue("{\"uri\":\"" + secretId + "\"}"); super.setContentType(SECRET_REFERENCE_CONTENT_TYPE); } /** * Get the secret ID value of this configuration setting. * * @return the secret ID value of this configuration setting. */ public String getSecretId() { return secretId; } /** * Set the secret ID value of this configuration setting. * * @param secretId the secret ID value of this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. */ public SecretReferenceConfigurationSetting setSecretId(String secretId) { this.secretId = secretId; updateSettingValue(); return this; } /** * Sets the key of this setting. * * @param key The key to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public SecretReferenceConfigurationSetting setKey(String key) { super.setKey(key); return this; } /** * Sets the value of this setting. * * @param value The value to associate with this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. */ @Override public SecretReferenceConfigurationSetting setValue(String value) { super.setValue(value); final SecretReferenceConfigurationSetting updatedSetting = readSecretReferenceConfigurationSettingValue( super.getKey(), value); this.secretId = updatedSetting.getSecretId(); return this; } /** * Sets the label of this configuration setting. {@link * set. * * @param label The label of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setLabel(String label) { super.setLabel(label); return this; } /** * Sets the content type. By default, the content type is null. * * @param contentType The content type of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setContentType(String contentType) { super.setContentType(contentType); return this; } /** * Sets the ETag for this configuration setting. * * @param etag The ETag for the configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setETag(String etag) { super.setETag(etag); return this; } /** * Sets the tags for this configuration setting. * * @param tags The tags to add to this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setTags(Map<String, String> tags) { super.setTags(tags); return this; } }
class SecretReferenceConfigurationSetting extends ConfigurationSetting { private static final ClientLogger LOGGER = new ClientLogger(SecretReferenceConfigurationSetting.class); private String secretId; private static final String SECRET_REFERENCE_CONTENT_TYPE = "application/vnd.microsoft.appconfig.keyvaultref+json;charset=utf-8"; /** * The constructor for a secret reference configuration setting. * * @param key A key name for this configuration setting. * @param secretId A uri value that used to in the JSON value of setting. e.x., {"uri":"{secretId}"}. */ public SecretReferenceConfigurationSetting(String key, String secretId) { this.secretId = secretId; super.setKey(key); super.setValue("{\"uri\":\"" + secretId + "\"}"); super.setContentType(SECRET_REFERENCE_CONTENT_TYPE); } /** * Get the secret ID value of this configuration setting. * * @return the secret ID value of this configuration setting. */ public String getSecretId() { return secretId; } /** * Set the secret ID value of this configuration setting. * * @param secretId the secret ID value of this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public SecretReferenceConfigurationSetting setSecretId(String secretId) { this.secretId = secretId; updateSettingValue(); return this; } /** * Sets the key of this setting. * * @param key The key to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public SecretReferenceConfigurationSetting setKey(String key) { super.setKey(key); return this; } /** * Sets the value of this setting. * * @param value The value to associate with this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ @Override public SecretReferenceConfigurationSetting setValue(String value) { super.setValue(value); final SecretReferenceConfigurationSetting updatedSetting = readSecretReferenceConfigurationSettingValue( super.getKey(), value); this.secretId = updatedSetting.getSecretId(); return this; } /** * Sets the label of this configuration setting. {@link * set. * * @param label The label of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setLabel(String label) { super.setLabel(label); return this; } /** * Sets the content type. By default, the content type is null. * * @param contentType The content type of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setContentType(String contentType) { super.setContentType(contentType); return this; } /** * Sets the ETag for this configuration setting. * * @param etag The ETag for the configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setETag(String etag) { super.setETag(etag); return this; } /** * Sets the tags for this configuration setting. * * @param tags The tags to add to this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setTags(Map<String, String> tags) { super.setTags(tags); return this; } }
Don't use `+ exception`. Instead pass it as param `LOGGER.logExceptionAsError(new IllegalArgumentException( "Can't parse Feature Flag configuration setting value.", exception))`. Or use `exception.getMessage()`.
private void updateSettingValue() { try { super.setValue(writeFeatureFlagConfigurationSetting(this)); } catch (IOException exception) { LOGGER.logExceptionAsError(new IllegalArgumentException( "Can't parse Feature Flag configuration setting value. Exception:" + exception)); } }
"Can't parse Feature Flag configuration setting value. Exception:" + exception));
private void updateSettingValue() { try { super.setValue(writeFeatureFlagConfigurationSetting(this)); } catch (IOException exception) { LOGGER.logExceptionAsError(new IllegalArgumentException( "Can't parse Feature Flag configuration setting value.", exception)); } }
class FeatureFlagConfigurationSetting extends ConfigurationSetting { private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class); private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8"; private String featureId; private boolean isEnabled; private String description; private String displayName; private List<FeatureFlagFilter> clientFilters; /** * A prefix is used to construct a feature flag configuration setting's key. */ public static final String KEY_PREFIX = ".appconfig.featureflag/"; /** * The constructor for a feature flag configuration setting. * * @param featureId A feature flag identification value that used to construct in setting's key. The key of setting * is {@code KEY_PREFIX} concatenate {@code featureId}. * @param isEnabled A boolean value to turn on/off the feature flag setting. */ public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) { this.featureId = featureId; this.isEnabled = isEnabled; super.setKey(KEY_PREFIX + featureId); super.setContentType(FEATURE_FLAG_CONTENT_TYPE); } /** * Sets the key of this setting. * * @param key The key to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setKey(String key) { super.setKey(key); return this; } /** * Sets the value of this setting. * * @param value The value to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ @Override public FeatureFlagConfigurationSetting setValue(String value) { super.setValue(value); final FeatureFlagConfigurationSetting updatedSetting = readFeatureFlagConfigurationSettingValue(value); this.featureId = updatedSetting.getFeatureId(); this.description = updatedSetting.getDescription(); this.isEnabled = updatedSetting.isEnabled(); this.displayName = updatedSetting.getDisplayName(); if (updatedSetting.getClientFilters() != null) { this.clientFilters = StreamSupport.stream(updatedSetting.getClientFilters().spliterator(), false) .collect(Collectors.toList()); } else { this.clientFilters = null; } return this; } /** * Sets the label of this configuration setting. {@link * set. * * @param label The label of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setLabel(String label) { super.setLabel(label); return this; } /** * Sets the content type. By default, the content type is null. * * @param contentType The content type of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setContentType(String contentType) { super.setContentType(contentType); return this; } /** * Sets the ETag for this configuration setting. * * @param etag The ETag for the configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setETag(String etag) { super.setETag(etag); return this; } /** * Sets the tags for this configuration setting. * * @param tags The tags to add to this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) { super.setTags(tags); return this; } /** * Get the feature ID of this configuration setting. * * @return the feature ID of this configuration setting. */ public String getFeatureId() { return featureId; } /** * Set the feature ID of this configuration setting. * * @param featureId the feature ID of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setFeatureId(String featureId) { this.featureId = featureId; super.setKey(KEY_PREFIX + featureId); updateSettingValue(); return this; } /** * Get the boolean indicator to show if the setting is turn on or off. * * @return the boolean indicator to show if the setting is turn on or off. */ public boolean isEnabled() { return this.isEnabled; } /** * Set the boolean indicator to show if the setting is turn on or off. * * @param isEnabled the boolean indicator to show if the setting is turn on or off. * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) { this.isEnabled = isEnabled; updateSettingValue(); return this; } /** * Get the description of this configuration setting. * * @return the description of this configuration setting. */ public String getDescription() { return description; } /** * Set the description of this configuration setting. * * @param description the description of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setDescription(String description) { this.description = description; updateSettingValue(); return this; } /** * Get the display name of this configuration setting. * * @return the display name of this configuration setting. */ public String getDisplayName() { return displayName; } /** * Set the display name of this configuration setting. * * @param displayName the display name of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setDisplayName(String displayName) { this.displayName = displayName; updateSettingValue(); return this; } /** * Gets the feature flag filters of this configuration setting. * * @return the feature flag filters of this configuration setting. */ public List<FeatureFlagFilter> getClientFilters() { return clientFilters; } /** * Sets the feature flag filters of this configuration setting. * * @param clientFilters the feature flag filters of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) { this.clientFilters = clientFilters; updateSettingValue(); return this; } /** * Add a feature flag filter to this configuration setting. * * @param clientFilter a feature flag filter to add to this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) { clientFilters.add(clientFilter); updateSettingValue(); return this; } }
class FeatureFlagConfigurationSetting extends ConfigurationSetting { private static final ClientLogger LOGGER = new ClientLogger(FeatureFlagConfigurationSetting.class); private static final String FEATURE_FLAG_CONTENT_TYPE = "application/vnd.microsoft.appconfig.ff+json;charset=utf-8"; private String featureId; private boolean isEnabled; private String description; private String displayName; private List<FeatureFlagFilter> clientFilters; /** * A prefix is used to construct a feature flag configuration setting's key. */ public static final String KEY_PREFIX = ".appconfig.featureflag/"; /** * The constructor for a feature flag configuration setting. * * @param featureId A feature flag identification value that used to construct in setting's key. The key of setting * is {@code KEY_PREFIX} concatenate {@code featureId}. * @param isEnabled A boolean value to turn on/off the feature flag setting. */ public FeatureFlagConfigurationSetting(String featureId, boolean isEnabled) { this.featureId = featureId; this.isEnabled = isEnabled; super.setKey(KEY_PREFIX + featureId); super.setContentType(FEATURE_FLAG_CONTENT_TYPE); } /** * Sets the key of this setting. * * @param key The key to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setKey(String key) { super.setKey(key); return this; } /** * Sets the value of this setting. * * @param value The value to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ @Override public FeatureFlagConfigurationSetting setValue(String value) { super.setValue(value); final FeatureFlagConfigurationSetting updatedSetting = readFeatureFlagConfigurationSettingValue(value); this.featureId = updatedSetting.getFeatureId(); this.description = updatedSetting.getDescription(); this.isEnabled = updatedSetting.isEnabled(); this.displayName = updatedSetting.getDisplayName(); if (updatedSetting.getClientFilters() != null) { this.clientFilters = StreamSupport.stream(updatedSetting.getClientFilters().spliterator(), false) .collect(Collectors.toList()); } else { this.clientFilters = null; } return this; } /** * Sets the label of this configuration setting. {@link * set. * * @param label The label of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setLabel(String label) { super.setLabel(label); return this; } /** * Sets the content type. By default, the content type is null. * * @param contentType The content type of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setContentType(String contentType) { super.setContentType(contentType); return this; } /** * Sets the ETag for this configuration setting. * * @param etag The ETag for the configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setETag(String etag) { super.setETag(etag); return this; } /** * Sets the tags for this configuration setting. * * @param tags The tags to add to this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public FeatureFlagConfigurationSetting setTags(Map<String, String> tags) { super.setTags(tags); return this; } /** * Get the feature ID of this configuration setting. * * @return the feature ID of this configuration setting. */ public String getFeatureId() { return featureId; } /** * Set the feature ID of this configuration setting. * * @param featureId the feature ID of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setFeatureId(String featureId) { this.featureId = featureId; super.setKey(KEY_PREFIX + featureId); updateSettingValue(); return this; } /** * Get the boolean indicator to show if the setting is turn on or off. * * @return the boolean indicator to show if the setting is turn on or off. */ public boolean isEnabled() { return this.isEnabled; } /** * Set the boolean indicator to show if the setting is turn on or off. * * @param isEnabled the boolean indicator to show if the setting is turn on or off. * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setEnabled(boolean isEnabled) { this.isEnabled = isEnabled; updateSettingValue(); return this; } /** * Get the description of this configuration setting. * * @return the description of this configuration setting. */ public String getDescription() { return description; } /** * Set the description of this configuration setting. * * @param description the description of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setDescription(String description) { this.description = description; updateSettingValue(); return this; } /** * Get the display name of this configuration setting. * * @return the display name of this configuration setting. */ public String getDisplayName() { return displayName; } /** * Set the display name of this configuration setting. * * @param displayName the display name of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setDisplayName(String displayName) { this.displayName = displayName; updateSettingValue(); return this; } /** * Gets the feature flag filters of this configuration setting. * * @return the feature flag filters of this configuration setting. */ public List<FeatureFlagFilter> getClientFilters() { return clientFilters; } /** * Sets the feature flag filters of this configuration setting. * * @param clientFilters the feature flag filters of this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public FeatureFlagConfigurationSetting setClientFilters(List<FeatureFlagFilter> clientFilters) { this.clientFilters = clientFilters; updateSettingValue(); return this; } /** * Add a feature flag filter to this configuration setting. * * @param clientFilter a feature flag filter to add to this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ public FeatureFlagConfigurationSetting addClientFilter(FeatureFlagFilter clientFilter) { clientFilters.add(clientFilter); updateSettingValue(); return this; } }
Same here
private void updateSettingValue() { try { super.setValue(writeSecretReferenceConfigurationSetting(this)); } catch (IOException exception) { LOGGER.logExceptionAsError(new IllegalArgumentException( "Can't parse Secret Reference configuration setting value. Exception:" + exception)); } }
"Can't parse Secret Reference configuration setting value. Exception:" + exception));
private void updateSettingValue() { try { super.setValue(writeSecretReferenceConfigurationSetting(this)); } catch (IOException exception) { LOGGER.logExceptionAsError(new IllegalArgumentException( "Can't parse Secret Reference configuration setting value.", exception)); } }
class SecretReferenceConfigurationSetting extends ConfigurationSetting { private static final ClientLogger LOGGER = new ClientLogger(SecretReferenceConfigurationSetting.class); private String secretId; private static final String SECRET_REFERENCE_CONTENT_TYPE = "application/vnd.microsoft.appconfig.keyvaultref+json;charset=utf-8"; /** * The constructor for a secret reference configuration setting. * * @param key A key name for this configuration setting. * @param secretId A uri value that used to in the JSON value of setting. e.x., {"uri":"{secretId}"}. */ public SecretReferenceConfigurationSetting(String key, String secretId) { this.secretId = secretId; super.setKey(key); super.setValue("{\"uri\":\"" + secretId + "\"}"); super.setContentType(SECRET_REFERENCE_CONTENT_TYPE); } /** * Get the secret ID value of this configuration setting. * * @return the secret ID value of this configuration setting. */ public String getSecretId() { return secretId; } /** * Set the secret ID value of this configuration setting. * * @param secretId the secret ID value of this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public SecretReferenceConfigurationSetting setSecretId(String secretId) { this.secretId = secretId; updateSettingValue(); return this; } /** * Sets the key of this setting. * * @param key The key to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public SecretReferenceConfigurationSetting setKey(String key) { super.setKey(key); return this; } /** * Sets the value of this setting. * * @param value The value to associate with this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ @Override public SecretReferenceConfigurationSetting setValue(String value) { super.setValue(value); final SecretReferenceConfigurationSetting updatedSetting = readSecretReferenceConfigurationSettingValue( super.getKey(), value); this.secretId = updatedSetting.getSecretId(); return this; } /** * Sets the label of this configuration setting. {@link * set. * * @param label The label of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setLabel(String label) { super.setLabel(label); return this; } /** * Sets the content type. By default, the content type is null. * * @param contentType The content type of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setContentType(String contentType) { super.setContentType(contentType); return this; } /** * Sets the ETag for this configuration setting. * * @param etag The ETag for the configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setETag(String etag) { super.setETag(etag); return this; } /** * Sets the tags for this configuration setting. * * @param tags The tags to add to this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setTags(Map<String, String> tags) { super.setTags(tags); return this; } }
class SecretReferenceConfigurationSetting extends ConfigurationSetting { private static final ClientLogger LOGGER = new ClientLogger(SecretReferenceConfigurationSetting.class); private String secretId; private static final String SECRET_REFERENCE_CONTENT_TYPE = "application/vnd.microsoft.appconfig.keyvaultref+json;charset=utf-8"; /** * The constructor for a secret reference configuration setting. * * @param key A key name for this configuration setting. * @param secretId A uri value that used to in the JSON value of setting. e.x., {"uri":"{secretId}"}. */ public SecretReferenceConfigurationSetting(String key, String secretId) { this.secretId = secretId; super.setKey(key); super.setValue("{\"uri\":\"" + secretId + "\"}"); super.setContentType(SECRET_REFERENCE_CONTENT_TYPE); } /** * Get the secret ID value of this configuration setting. * * @return the secret ID value of this configuration setting. */ public String getSecretId() { return secretId; } /** * Set the secret ID value of this configuration setting. * * @param secretId the secret ID value of this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ public SecretReferenceConfigurationSetting setSecretId(String secretId) { this.secretId = secretId; updateSettingValue(); return this; } /** * Sets the key of this setting. * * @param key The key to associate with this configuration setting. * * @return The updated {@link FeatureFlagConfigurationSetting} object. */ @Override public SecretReferenceConfigurationSetting setKey(String key) { super.setKey(key); return this; } /** * Sets the value of this setting. * * @param value The value to associate with this configuration setting. * * @return The updated {@link SecretReferenceConfigurationSetting} object. * @throws IllegalArgumentException if the setting's {@code value} is an invalid JSON format. */ @Override public SecretReferenceConfigurationSetting setValue(String value) { super.setValue(value); final SecretReferenceConfigurationSetting updatedSetting = readSecretReferenceConfigurationSettingValue( super.getKey(), value); this.secretId = updatedSetting.getSecretId(); return this; } /** * Sets the label of this configuration setting. {@link * set. * * @param label The label of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setLabel(String label) { super.setLabel(label); return this; } /** * Sets the content type. By default, the content type is null. * * @param contentType The content type of this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setContentType(String contentType) { super.setContentType(contentType); return this; } /** * Sets the ETag for this configuration setting. * * @param etag The ETag for the configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setETag(String etag) { super.setETag(etag); return this; } /** * Sets the tags for this configuration setting. * * @param tags The tags to add to this configuration setting. * @return The updated ConfigurationSetting object. */ @Override public SecretReferenceConfigurationSetting setTags(Map<String, String> tags) { super.setTags(tags); return this; } }
We should use HttpResponseException or AzureException (or any other appropriate exception defined in azure-core) instead of using RuntimeException directly.
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
return Mono.error(new RuntimeException("Long running operation failed."));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
We can use the constants defined in HttpMethod type instead of using string literals to compare.
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) {
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
Return Mono.error() instead in all async methods
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
throw logger.logExceptionAsError(new RuntimeException("Cannot get final result"));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
Maybe you can create an enum class or make it a constant.
public PollResult setStatus(String status) { if ("NotStarted".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if ("InProgress".equalsIgnoreCase(status) || "Running".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if ("Succeeded".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if ("Failed".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; }
if ("NotStarted".equalsIgnoreCase(status)) {
public PollResult setStatus(String status) { if (PollingConstants.STATUS_NOT_STARTED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if (PollingConstants.STATUS_IN_PROGRESS.equalsIgnoreCase(status) || PollingConstants.STATUS_RUNNING.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if (PollingConstants.STATUS_SUCCEEDED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if (PollingConstants.STATUS_FAILED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; }
class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } }
class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } }
Is there a guarantee that the serialization encoding will always be JSON?
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
SerializerEncoding.JSON));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
nit: Mono.error
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl = pollingContext.getData(RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
throw logger.logExceptionAsError(new RuntimeException("Cannot get final result"));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl = pollingContext.getData(PollingConstants.RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String OPERATION_LOCATION = "Operation-Location"; private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RESOURCE_LOCATION = "resourceLocation"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final SerializerAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(OperationResourcePollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public OperationResourcePollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } /** * Gets the name of the operation location header. By default it's "Operation-Location". * @return the name of the operation location header */ public String getOperationLocationHeaderName() { return OPERATION_LOCATION; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(getOperationLocationHeaderName()); return Mono.just(operationLocationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(getOperationLocationHeaderName()); HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (operationLocationHeader != null) { pollingContext.setData(OPERATION_LOCATION, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { return Mono.error(new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(OPERATION_LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> res.getBodyAsString() .flatMap(body -> Mono.fromCallable(() -> serializer.<PollResult>deserialize(body, PollResult.class, SerializerEncoding.JSON)) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(POLL_RESPONSE_BODY, body); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); return Mono.fromCallable(() -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (T) BinaryData.fromString(body); } else { return serializer.deserialize(body, pollResponseType.getJavaType(), SerializerEncoding.JSON); } }).map(pollResponse -> { if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }))); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if ("NotStarted".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if ("InProgress".equalsIgnoreCase(status) || "Running".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if ("Succeeded".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if ("Failed".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String DEFAULT_OPERATION_LOCATION_HEADER = "Operation-Location"; private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; private final String operationLocationHeaderName; /** * Creates an instance of the operation resource polling strategy using a JSON serializer and "Operation-Location" * as the header for polling. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer(), DEFAULT_OPERATION_LOCATION_HEADER); } /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @param operationLocationHeaderName a custom header for polling the long running operation */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer, String operationLocationHeaderName) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = serializer != null ? serializer : new DefaultJsonSerializer(); this.operationLocationHeaderName = operationLocationHeaderName != null ? operationLocationHeaderName : DEFAULT_OPERATION_LOCATION_HEADER; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(operationLocationHeaderName); if (operationLocationHeader != null) { try { new URL(operationLocationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(operationLocationHeaderName); HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (operationLocationHeader != null) { pollingContext.setData(operationLocationHeaderName, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", '%s' header: %s, and response body: %s", response.getStatusCode(), operationLocationHeaderName, operationLocationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(operationLocationHeaderName)); return httpPipeline.send(request).flatMap(response -> response.getBodyAsByteArray() .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse( binaryData, serializer, new TypeReference<PollResult>() { }) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(PollingConstants.RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }))); } @Override /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if (PollingConstants.STATUS_NOT_STARTED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if (PollingConstants.STATUS_IN_PROGRESS.equalsIgnoreCase(status) || PollingConstants.STATUS_RUNNING.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if (PollingConstants.STATUS_SUCCEEDED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if (PollingConstants.STATUS_FAILED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
```suggestion return Mono.error(new RuntimeException("Long running operation cancelled.")); ```
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl = pollingContext.getData(RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
return Mono.error(new RuntimeException("Long running operation canceled."));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl = pollingContext.getData(PollingConstants.RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String OPERATION_LOCATION = "Operation-Location"; private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RESOURCE_LOCATION = "resourceLocation"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final SerializerAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(OperationResourcePollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public OperationResourcePollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } /** * Gets the name of the operation location header. By default it's "Operation-Location". * @return the name of the operation location header */ public String getOperationLocationHeaderName() { return OPERATION_LOCATION; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(getOperationLocationHeaderName()); return Mono.just(operationLocationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(getOperationLocationHeaderName()); HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (operationLocationHeader != null) { pollingContext.setData(OPERATION_LOCATION, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { return Mono.error(new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(OPERATION_LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> res.getBodyAsString() .flatMap(body -> Mono.fromCallable(() -> serializer.<PollResult>deserialize(body, PollResult.class, SerializerEncoding.JSON)) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(POLL_RESPONSE_BODY, body); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); return Mono.fromCallable(() -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (T) BinaryData.fromString(body); } else { return serializer.deserialize(body, pollResponseType.getJavaType(), SerializerEncoding.JSON); } }).map(pollResponse -> { if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }))); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if ("NotStarted".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if ("InProgress".equalsIgnoreCase(status) || "Running".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if ("Succeeded".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if ("Failed".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String DEFAULT_OPERATION_LOCATION_HEADER = "Operation-Location"; private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; private final String operationLocationHeaderName; /** * Creates an instance of the operation resource polling strategy using a JSON serializer and "Operation-Location" * as the header for polling. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer(), DEFAULT_OPERATION_LOCATION_HEADER); } /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @param operationLocationHeaderName a custom header for polling the long running operation */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer, String operationLocationHeaderName) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = serializer != null ? serializer : new DefaultJsonSerializer(); this.operationLocationHeaderName = operationLocationHeaderName != null ? operationLocationHeaderName : DEFAULT_OPERATION_LOCATION_HEADER; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(operationLocationHeaderName); if (operationLocationHeader != null) { try { new URL(operationLocationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(operationLocationHeaderName); HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (operationLocationHeader != null) { pollingContext.setData(operationLocationHeaderName, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", '%s' header: %s, and response body: %s", response.getStatusCode(), operationLocationHeaderName, operationLocationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(operationLocationHeaderName)); return httpPipeline.send(request).flatMap(response -> response.getBodyAsByteArray() .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse( binaryData, serializer, new TypeReference<PollResult>() { }) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(PollingConstants.RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }))); } @Override /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if (PollingConstants.STATUS_NOT_STARTED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if (PollingConstants.STATUS_IN_PROGRESS.equalsIgnoreCase(status) || PollingConstants.STATUS_RUNNING.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if (PollingConstants.STATUS_SUCCEEDED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if (PollingConstants.STATUS_FAILED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
Moving to a static final field for now. We can move to an enum or `Constants` class if we need it again in the future
public PollResult setStatus(String status) { if ("NotStarted".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if ("InProgress".equalsIgnoreCase(status) || "Running".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if ("Succeeded".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if ("Failed".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; }
if ("NotStarted".equalsIgnoreCase(status)) {
public PollResult setStatus(String status) { if (PollingConstants.STATUS_NOT_STARTED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if (PollingConstants.STATUS_IN_PROGRESS.equalsIgnoreCase(status) || PollingConstants.STATUS_RUNNING.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if (PollingConstants.STATUS_SUCCEEDED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if (PollingConstants.STATUS_FAILED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; }
class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } }
class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } }
I think I got it now. Thanks
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); }
return applySessionToken(request).then(addIntendedCollectionRid(request));
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
Using AzureException when there's no response body
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
return Mono.error(new RuntimeException("Long running operation failed."));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
Fixed.
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) {
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
Fixed.
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
throw logger.logExceptionAsError(new RuntimeException("Cannot get final result"));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
Moved to `implementation/PollingConstants.java`
public PollResult setStatus(String status) { if ("NotStarted".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if ("InProgress".equalsIgnoreCase(status) || "Running".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if ("Succeeded".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if ("Failed".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; }
if ("NotStarted".equalsIgnoreCase(status)) {
public PollResult setStatus(String status) { if (PollingConstants.STATUS_NOT_STARTED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if (PollingConstants.STATUS_IN_PROGRESS.equalsIgnoreCase(status) || PollingConstants.STATUS_RUNNING.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if (PollingConstants.STATUS_SUCCEEDED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if (PollingConstants.STATUS_FAILED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; }
class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } }
class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } }
fixed
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl = pollingContext.getData(RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
throw logger.logExceptionAsError(new RuntimeException("Cannot get final result"));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl = pollingContext.getData(PollingConstants.RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String OPERATION_LOCATION = "Operation-Location"; private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RESOURCE_LOCATION = "resourceLocation"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final SerializerAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(OperationResourcePollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public OperationResourcePollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } /** * Gets the name of the operation location header. By default it's "Operation-Location". * @return the name of the operation location header */ public String getOperationLocationHeaderName() { return OPERATION_LOCATION; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(getOperationLocationHeaderName()); return Mono.just(operationLocationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(getOperationLocationHeaderName()); HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (operationLocationHeader != null) { pollingContext.setData(OPERATION_LOCATION, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { return Mono.error(new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(OPERATION_LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> res.getBodyAsString() .flatMap(body -> Mono.fromCallable(() -> serializer.<PollResult>deserialize(body, PollResult.class, SerializerEncoding.JSON)) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(POLL_RESPONSE_BODY, body); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); return Mono.fromCallable(() -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (T) BinaryData.fromString(body); } else { return serializer.deserialize(body, pollResponseType.getJavaType(), SerializerEncoding.JSON); } }).map(pollResponse -> { if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }))); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if ("NotStarted".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if ("InProgress".equalsIgnoreCase(status) || "Running".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if ("Succeeded".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if ("Failed".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String DEFAULT_OPERATION_LOCATION_HEADER = "Operation-Location"; private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; private final String operationLocationHeaderName; /** * Creates an instance of the operation resource polling strategy using a JSON serializer and "Operation-Location" * as the header for polling. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer(), DEFAULT_OPERATION_LOCATION_HEADER); } /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @param operationLocationHeaderName a custom header for polling the long running operation */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer, String operationLocationHeaderName) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = serializer != null ? serializer : new DefaultJsonSerializer(); this.operationLocationHeaderName = operationLocationHeaderName != null ? operationLocationHeaderName : DEFAULT_OPERATION_LOCATION_HEADER; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(operationLocationHeaderName); if (operationLocationHeader != null) { try { new URL(operationLocationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(operationLocationHeaderName); HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (operationLocationHeader != null) { pollingContext.setData(operationLocationHeaderName, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", '%s' header: %s, and response body: %s", response.getStatusCode(), operationLocationHeaderName, operationLocationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(operationLocationHeaderName)); return httpPipeline.send(request).flatMap(response -> response.getBodyAsByteArray() .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse( binaryData, serializer, new TypeReference<PollResult>() { }) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(PollingConstants.RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }))); } @Override /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if (PollingConstants.STATUS_NOT_STARTED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if (PollingConstants.STATUS_IN_PROGRESS.equalsIgnoreCase(status) || PollingConstants.STATUS_RUNNING.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if (PollingConstants.STATUS_SUCCEEDED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if (PollingConstants.STATUS_FAILED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
fixed.
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl = pollingContext.getData(RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
return Mono.error(new RuntimeException("Long running operation canceled."));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl = pollingContext.getData(PollingConstants.RESOURCE_LOCATION); if (finalGetUrl == null) { String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String OPERATION_LOCATION = "Operation-Location"; private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RESOURCE_LOCATION = "resourceLocation"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final SerializerAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(OperationResourcePollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public OperationResourcePollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } /** * Gets the name of the operation location header. By default it's "Operation-Location". * @return the name of the operation location header */ public String getOperationLocationHeaderName() { return OPERATION_LOCATION; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(getOperationLocationHeaderName()); return Mono.just(operationLocationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(getOperationLocationHeaderName()); HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (operationLocationHeader != null) { pollingContext.setData(OPERATION_LOCATION, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { return Mono.error(new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(OPERATION_LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> res.getBodyAsString() .flatMap(body -> Mono.fromCallable(() -> serializer.<PollResult>deserialize(body, PollResult.class, SerializerEncoding.JSON)) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(POLL_RESPONSE_BODY, body); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); return Mono.fromCallable(() -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (T) BinaryData.fromString(body); } else { return serializer.deserialize(body, pollResponseType.getJavaType(), SerializerEncoding.JSON); } }).map(pollResponse -> { if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }))); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if ("NotStarted".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if ("InProgress".equalsIgnoreCase(status) || "Running".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if ("Succeeded".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if ("Failed".equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
class OperationResourcePollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String DEFAULT_OPERATION_LOCATION_HEADER = "Operation-Location"; private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; private final String operationLocationHeaderName; /** * Creates an instance of the operation resource polling strategy using a JSON serializer and "Operation-Location" * as the header for polling. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer(), DEFAULT_OPERATION_LOCATION_HEADER); } /** * Creates an instance of the operation resource polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses * @param operationLocationHeaderName a custom header for polling the long running operation */ public OperationResourcePollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer, String operationLocationHeaderName) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = serializer != null ? serializer : new DefaultJsonSerializer(); this.operationLocationHeaderName = operationLocationHeaderName != null ? operationLocationHeaderName : DEFAULT_OPERATION_LOCATION_HEADER; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader operationLocationHeader = initialResponse.getHeaders().get(operationLocationHeaderName); if (operationLocationHeader != null) { try { new URL(operationLocationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader operationLocationHeader = response.getHeaders().get(operationLocationHeaderName); HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (operationLocationHeader != null) { pollingContext.setData(operationLocationHeaderName, operationLocationHeader.getValue()); } if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", '%s' header: %s, and response body: %s", response.getStatusCode(), operationLocationHeaderName, operationLocationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(operationLocationHeaderName)); return httpPipeline.send(request).flatMap(response -> response.getBodyAsByteArray() .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse( binaryData, serializer, new TypeReference<PollResult>() { }) .map(pollResult -> { if (pollResult.getResourceLocation() != null) { pollingContext.setData(PollingConstants.RESOURCE_LOCATION, pollResult.getResourceLocation()); } pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); return pollResult.getStatus(); }) .flatMap(status -> { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }))); } @Override /** * A simple structure representing the partial response received from an operation location URL, containing the * information of the status of the long running operation. */ private static class PollResult { private LongRunningOperationStatus status; private String resourceLocation; /** * Gets the status of the long running operation. * @return the status represented as a {@link LongRunningOperationStatus} */ public LongRunningOperationStatus getStatus() { return status; } /** * Sets the long running operation status in the format of a string returned by the service. This is called by * the deserializer when a response is received. * * @param status the status of the long running operation * @return the modified PollResult instance */ @JsonSetter public PollResult setStatus(String status) { if (PollingConstants.STATUS_NOT_STARTED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.NOT_STARTED; } else if (PollingConstants.STATUS_IN_PROGRESS.equalsIgnoreCase(status) || PollingConstants.STATUS_RUNNING.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.IN_PROGRESS; } else if (PollingConstants.STATUS_SUCCEEDED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else if (PollingConstants.STATUS_FAILED.equalsIgnoreCase(status)) { this.status = LongRunningOperationStatus.FAILED; } else { this.status = LongRunningOperationStatus.fromString(status, true); } return this; } /** * Sets the long running operation status in the format of the {@link LongRunningOperationStatus} enum. * * @param status the status of the long running operation * @return the modified PollResult instance */ public PollResult setStatus(LongRunningOperationStatus status) { this.status = status; return this; } /** * Gets the resource location URL to get the final result. This is often available in the response when the * long running operation has been successfully completed. * * @return the resource location URL to get he final result */ public String getResourceLocation() { return resourceLocation; } /** * Sets the resource location URL. this should only be called by the deserializer when a response is received. * * @param resourceLocation the resource location URL * @return the modified PollResult instance */ public PollResult setResourceLocation(String resourceLocation) { this.resourceLocation = resourceLocation; return this; } } }
Not a blocking issue. You might want to double check whether the header is indeed an URI. We had issue of invalid `Azure-AsyncOperation` header (which is defined for MS internal use). https://github.com/Azure/azure-sdk-for-java/issues/22032#issuecomment-855588700 And `Location` header would be more likely to be used for things other than LRO (redirection, etc.).
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); return Mono.just(locationHeader != null); }
}
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS).flatMap(status -> { if (response.getValue() == null) { return Mono.just(new PollResponse<>(status, null)); } else if (TypeUtil.isTypeOrSubTypeOf( response.getValue().getClass(), pollResponseType.getJavaType())) { return Mono.just(new PollResponse<>(status, (T) response.getValue())); } else { Mono<BinaryData> binaryDataMono; if (response.getValue() instanceof BinaryData) { binaryDataMono = Mono.just((BinaryData) response.getValue()); } else { binaryDataMono = BinaryData.fromObjectAsync(response.getValue()); } if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return binaryDataMono.map(binaryData -> new PollResponse<>(status, (T) binaryData)); } else { return binaryDataMono.flatMap(binaryData -> binaryData.toObjectAsync(pollResponseType)) .map(value -> new PollResponse<>(status, value)); } } }); } else { return Mono.error(new AzureException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request, context).flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return res.getBodyAsString().map(BinaryData::fromString).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(PollingConstants.RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> SERIALIZER.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request, context).flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsByteArray().flatMap(body -> Mono.fromCallable(() -> SERIALIZER.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } } @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } } }
This needs to be documented as an `@throws`
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { Objects.requireNonNull(data, "'content' cannot be null."); if (length != null && length < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); }
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'length' cannot be less than 0."));
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final int CHUNK_SIZE = 8092; static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * <p> * If the {@code data} is null or a zero length string an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, CHUNK_SIZE); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. */ public static BinaryData fromFile(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data))); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance being updated as well. To * safely update the byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile} * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended * to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
Added support for serializer config.
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new RuntimeException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new RuntimeException("Long running operation canceled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(HTTP_METHOD); if ("PUT".equalsIgnoreCase(httpMethod) || "PATCH".equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(REQUEST_URL); } else if ("POST".equalsIgnoreCase(httpMethod) && pollingContext.getData(LOCATION) != null) { finalGetUrl = pollingContext.getData(LOCATION); } else { throw logger.logExceptionAsError(new RuntimeException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> serializer.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsString().flatMap(body -> Mono.fromCallable(() -> serializer.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } }
SerializerEncoding.JSON));
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final String LOCATION = "Location"; private static final String REQUEST_URL = "requestURL"; private static final String HTTP_METHOD = "httpMethod"; private static final String RETRY_AFTER = "Retry-After"; private static final String POLL_RESPONSE_BODY = "pollResponseBody"; private final JacksonAdapter serializer = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(LOCATION); return Mono.just(locationHeader != null); } @SuppressWarnings("unchecked") @Override public Mono<LongRunningOperationStatus> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } pollingContext.setData(HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS); } else { throw logger.logExceptionAsError( new RuntimeException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(LOCATION)); Mono<HttpResponse> responseMono; if (context == null) { responseMono = httpPipeline.send(request); } else { responseMono = httpPipeline.send(request, context); } return responseMono.flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(LOCATION); if (locationHeader != null) { pollingContext.setData(LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return BinaryData.fromFlux(res.getBody()).flatMap(binaryData -> { pollingContext.setData(POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); } @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override }
> And Location header would be more likely to be used for things other than LRO (redirection, etc.). I think having it close to the end of the chain will reduce the chance of misusing it from its original intention. If an API is labeled `x-ms-long-running-operation` and returns `Location` header for other things other than LRO and uses another way of handling LRO, a custom polling strategy is probably needed for it.
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); return Mono.just(locationHeader != null); }
}
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS).flatMap(status -> { if (response.getValue() == null) { return Mono.just(new PollResponse<>(status, null)); } else if (TypeUtil.isTypeOrSubTypeOf( response.getValue().getClass(), pollResponseType.getJavaType())) { return Mono.just(new PollResponse<>(status, (T) response.getValue())); } else { Mono<BinaryData> binaryDataMono; if (response.getValue() instanceof BinaryData) { binaryDataMono = Mono.just((BinaryData) response.getValue()); } else { binaryDataMono = BinaryData.fromObjectAsync(response.getValue()); } if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return binaryDataMono.map(binaryData -> new PollResponse<>(status, (T) binaryData)); } else { return binaryDataMono.flatMap(binaryData -> binaryData.toObjectAsync(pollResponseType)) .map(value -> new PollResponse<>(status, value)); } } }); } else { return Mono.error(new AzureException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request, context).flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return res.getBodyAsString().map(BinaryData::fromString).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(PollingConstants.RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> SERIALIZER.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request, context).flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsByteArray().flatMap(body -> Mono.fromCallable(() -> SERIALIZER.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } } @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } } }
I also added check of the headers - if it's not a URL `canPoll` will return false
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); return Mono.just(locationHeader != null); }
}
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS).flatMap(status -> { if (response.getValue() == null) { return Mono.just(new PollResponse<>(status, null)); } else if (TypeUtil.isTypeOrSubTypeOf( response.getValue().getClass(), pollResponseType.getJavaType())) { return Mono.just(new PollResponse<>(status, (T) response.getValue())); } else { Mono<BinaryData> binaryDataMono; if (response.getValue() instanceof BinaryData) { binaryDataMono = Mono.just((BinaryData) response.getValue()); } else { binaryDataMono = BinaryData.fromObjectAsync(response.getValue()); } if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return binaryDataMono.map(binaryData -> new PollResponse<>(status, (T) binaryData)); } else { return binaryDataMono.flatMap(binaryData -> binaryData.toObjectAsync(pollResponseType)) .map(value -> new PollResponse<>(status, value)); } } }); } else { return Mono.error(new AzureException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request, context).flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return res.getBodyAsString().map(BinaryData::fromString).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(PollingConstants.RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> SERIALIZER.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request, context).flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsByteArray().flatMap(body -> Mono.fromCallable(() -> SERIALIZER.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } } @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } } }
It is OK. Just that `URL` in Java actually do not check much. Hence our problem in the issue mentioned above (URL check pass, but still contains space). https://stackoverflow.com/a/5965755
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); return Mono.just(locationHeader != null); }
}
public Mono<Boolean> canPoll(Response<?> initialResponse) { HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { try { new URL(locationHeader.getValue()); return Mono.just(true); } catch (MalformedURLException e) { return Mono.just(false); } } return Mono.just(false); }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private final HttpPipeline httpPipeline; private final Context context; /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param context additional metadata to pass along with the request */ public LocationPollingStrategy( HttpPipeline httpPipeline, Context context) { this.httpPipeline = httpPipeline; this.context = context; } @Override @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { return Mono.just(LongRunningOperationStatus.IN_PROGRESS).flatMap(status -> { if (response.getValue() == null) { return Mono.just(new PollResponse<>(status, null)); } else if (TypeUtil.isTypeOrSubTypeOf( response.getValue().getClass(), pollResponseType.getJavaType())) { return Mono.just(new PollResponse<>(status, (T) response.getValue())); } else { Mono<BinaryData> binaryDataMono; if (response.getValue() instanceof BinaryData) { binaryDataMono = Mono.just((BinaryData) response.getValue()); } else { binaryDataMono = BinaryData.fromObjectAsync(response.getValue()); } if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return binaryDataMono.map(binaryData -> new PollResponse<>(status, (T) binaryData)); } else { return binaryDataMono.flatMap(binaryData -> binaryData.toObjectAsync(pollResponseType)) .map(value -> new PollResponse<>(status, value)); } } }); } else { return Mono.error(new AzureException("Operation failed or cancelled: " + response.getStatusCode())); } } @SuppressWarnings("unchecked") @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request, context).flatMap(res -> { HttpHeader locationHeader = res.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (res.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (res.getStatusCode() >= 200 && res.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return res.getBodyAsString().map(BinaryData::fromString).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, pollResponseType.getJavaType())) { return (Mono<T>) Mono.just(binaryData); } else { return binaryData.toObjectAsync(pollResponseType); } }).map(pollResponse -> { String retryAfter = res.getHeaderValue(PollingConstants.RETRY_AFTER); if (retryAfter != null) { return new PollResponse<>(status, pollResponse, Duration.ofSeconds(Long.parseLong(retryAfter))); } else { return new PollResponse<>(status, pollResponse); } }); }); } @SuppressWarnings("unchecked") @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) Mono.just(BinaryData.fromString(latestResponseBody)); } else { return Mono.fromCallable(() -> SERIALIZER.deserialize(latestResponseBody, resultType.getJavaType(), SerializerEncoding.JSON)); } } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request, context).flatMap(res -> { if (TypeUtil.isTypeOrSubTypeOf(BinaryData.class, resultType.getJavaType())) { return (Mono<U>) BinaryData.fromFlux(res.getBody()); } else { return res.getBodyAsByteArray().flatMap(body -> Mono.fromCallable(() -> SERIALIZER.deserialize(body, resultType.getJavaType(), SerializerEncoding.JSON))); } }); } } @Override public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) { return Mono.error(new IllegalStateException("Cancellation is not supported.")); } }
class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> { private final HttpPipeline httpPipeline; private final ObjectSerializer serializer; /** * Creates an instance of the location polling strategy using a JSON serializer. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with */ public LocationPollingStrategy(HttpPipeline httpPipeline) { this(httpPipeline, new DefaultJsonSerializer()); } /** * Creates an instance of the location polling strategy. * * @param httpPipeline an instance of {@link HttpPipeline} to send requests with * @param serializer a custom serializer for serializing and deserializing polling responses */ public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) { this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null"); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); } @Override @Override public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name()); pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString()); if (response.getStatusCode() == 200 || response.getStatusCode() == 201 || response.getStatusCode() == 202 || response.getStatusCode() == 204) { String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType) .map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter)) .switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>( LongRunningOperationStatus.IN_PROGRESS, null, retryAfter)))); } else { return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d," + ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader, PollingUtils.serializeResponse(response.getValue(), serializer)))); } } @Override public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) { HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION)); return httpPipeline.send(request).flatMap(response -> { HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION); if (locationHeader != null) { pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue()); } LongRunningOperationStatus status; if (response.getStatusCode() == 202) { status = LongRunningOperationStatus.IN_PROGRESS; } else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) { status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; } else { status = LongRunningOperationStatus.FAILED; } return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> { pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER); Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue)); return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType) .map(value -> new PollResponse<>(status, value, retryAfter)); }); }); } @Override public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) { if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) { return Mono.error(new AzureException("Long running operation failed.")); } else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) { return Mono.error(new AzureException("Long running operation cancelled.")); } String finalGetUrl; String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD); if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod) || HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) { finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL); } else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod) && pollingContext.getData(PollingConstants.LOCATION) != null) { finalGetUrl = pollingContext.getData(PollingConstants.LOCATION); } else { return Mono.error(new AzureException("Cannot get final result")); } if (finalGetUrl == null) { String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY); return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType); } else { HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl); return httpPipeline.send(request) .flatMap(HttpResponse::getBodyAsByteArray) .map(BinaryData::fromBytes) .flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType)); } } }
Shouldn't we have separate checks for these? In case decode started, and never finished. It may provide insights on gaps in the SDK or decode process or any internal errors. Like we have for other events. where we see null end time.
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } if (response.getDecodeEndTime() != null && response.getDecodeStartTime() != null) { requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage(RntbdRequestRecord.Stage.DECODE_COMPLETED, response.getDecodeEndTime()); } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } }
if (response.getDecodeEndTime() != null && response.getDecodeStartTime() != null) {
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
what is this change?
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw"); return internalObjectNode; }
BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw");
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(1); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
Can we please add a test case for decodeCompleted time?
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw"); return internalObjectNode; }
BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw");
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(1); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
I forget why the specific checks were added previously. But currently, the decode startTime and endTime will either both exists or both null. Has changed to remove the check for now.
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } if (response.getDecodeEndTime() != null && response.getDecodeStartTime() != null) { requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage(RntbdRequestRecord.Stage.DECODE_COMPLETED, response.getDecodeEndTime()); } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } }
if (response.getDecodeEndTime() != null && response.getDecodeStartTime() != null) {
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
I was doing some test with a large payload to check the time spent on decode stage. has reverted it back.
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw"); return internalObjectNode; }
BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw");
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(1); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
There was a test case to make sure all the stages will showup in the diagnostics. Has updated that method.
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw"); return internalObjectNode; }
BridgeInternal.setProperty(internalObjectNode, "mypk", "wiqkclagxrdqclmvuzcsomihrkxbbikwypqrcgrhpgkylztdxxirzwwkmleovdrikggqupfcclpaowyetbywvbeyegniacruvzncxflfzrwnhtdubzrezefqhtyznagotfxtcynnyderhryvbtpoxbuyfkkwsmoydfmglwzgqobraysfidipfsybbgsromwwiuygkuzbitkwzvzuenfjvxkklcjomrasmddllqfiqmgmcbmnmikpvyzbdbuitfjuohwbqfyueqjvbwehgwhosooehtglncoyiundhhqrsbkazhuxjzqoteouwexvzchkhuukpkqkdlkfhhqbgcvrhzizljrkfujebedsfdrvzdvavxrwpkyqvnvbbqvoylvxejetqvzqaakrsvbnilfngjgdavkdwfgxqvhjatrccvsbjpueenjljgouzoqdweckcjjhfiloefucsmloniyklseuztfhkzufbzntnvlaziaqqoayilyxssmfafvawxxnhyyaosnlsujsemzwvpirnbkjtfbggaamzcocdvsebdkxkyfvkakvangpgmrqqctlrbblrmmwchqliypeuuqtbiozgtfjtwjdhhowetaltnlpwqokckxpbfrpyshcygzrxdzhnffjagppnaeyrzuhrofoyurrythnpefgtiybfmckzwvsgntsudnqfqcudeporevbhgvlkaavdypfiahusbzwcgsigspfpxlqbxhqfzbrrcrgtlcbokvrtrghobknoxcaelnhchwaekpqepimanaqagajdpzbsaoepnkkfrxsavjkpxeawhrhacnnutphuzngxnpzrtujcfwvmhxuskdmmoyjdtrmqvbgehwpfseiyjsmugjpcbndigkufynwnuolasltqxfirxvkqcibculpqbarejhtufquoqzvvpwfszzpsjovmajrfviuozdscztuhavlszbdncrxdsmoikkmxwenfkqoomdzxbvw");
private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotNull(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(1); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
I guess the value of `prefetch` is the value of `preFetch` in your demo code?
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
that's correct.
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
The JavaDoc for this method should be updated to reflect this behavior. Apparently, this behavior of adding just 1 credit at a time is similar to what Track 1 did. There may have been a reason for doing it this way that @conniey may be aware of.
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
Also, have we measured the perf improvement with this change and has this made the client less chatty?
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
Another question is, what's the theoretical max number of messages in the cache when we add credits in hundreds. For instance, if the cache already has a lot of messages and we add a few hundred credits after each message is consumed?
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
> srnagar Also, have we measured the perf improvement with this change and has this made the client less chatty? @srnagar, yep, I've used the `receiveevents` test in the perf framework which exercises the no-backpressure code path. The performance of T2 in this code-path improved drastically and T1 and T2 are very close [when no-bp]. Below is the result of perf testing this specific code path with a prefetch of 100 and a count of 1800 (the test is the same as `receiveevents` perf test, I slightly modified it to be more debuggable and print extra info). [Executed for 5 times] ``` TRACK_1: Start-- Took 14 seconds to receive 1800 events TRACK_1: End-- TRACK_2: Start-- Took 9 seconds to receive 1800 events TRACK_2: End-- TRACK_1: Start-- Took 5 seconds to receive 1800 events TRACK_1: End-- TRACK_2: Start-- Took 5 seconds to receive 1800 events TRACK_2: End-- TRACK_1: Start-- Took 5 seconds to receive 1800 events TRACK_1: End-- TRACK_2: Start-- Took 5 seconds to receive 1800 events TRACK_2: End-- TRACK_1: Start-- Took 5 seconds to receive 1800 events TRACK_1: End-- TRACK_2: Start-- Took 12 seconds to receive 1800 events TRACK_2: End-- TRACK_1: Start-- Took 8 seconds to receive 1800 events TRACK_1: End-- TRACK_2: Start-- Took 7 seconds to receive 1800 events TRACK_2: End-- ```
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
> srnagar .... Apparently, this behavior of adding just 1 credit at a time is similar to what Track 1 did. There may have been a reason for doing it this way that @conniey may be aware of. Connie and I visited that track-1 code, track-1 has a special code path that adds 1 credit at a time, but that only accumulates the credits locally. Once the accumulated credits hit a cap, the actual Flow will be sent. So, track1 won't send one flow for each credit.
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
> YijunXieMS Another question is, what's the theoretical max number of messages in the cache when we add credits in hundreds. For instance, if the cache already has a lot of messages and we add a few hundred credits after each message is consumed? `what's the theoretical max number of messages in the cache` - It depends on the operators that the user applied in the chain in their application side, different operators can have their own queue, holding messages to be delivered for their immediate subscriber. Our processor also has its own queue (say q1), when we receive a "request" for messages from the downstream, the credit will be computed using the size of q1 and "request"ed amount, that's the back-pressure aware flow, MAX_VALUE is the no-back-pressure flow.
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
credits = prefetch;
private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long request = REQUESTED.get(this); final int credits; if (subscriber == null || request == 0) { credits = 0; } else if (request == Long.MAX_VALUE) { credits = prefetch; } else { final int remaining = Long.valueOf(request).intValue() - messageQueue.size(); credits = Math.max(remaining, 0); } return credits; }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkHasNoCredits = new AtomicBoolean(); private final Object creditsAdded = new Object(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final String entityPath; private final Disposable parentConnection; private final int maxQueueSize; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile String currentLinkName; private volatile Disposable currentLinkSubscriptions; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(String entityPath, int prefetch, Disposable parentConnection) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; this.maxQueueSize = prefetch * 2; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; currentLinkName = next.getLinkName(); next.setEmptyCreditListener(() -> { final int credits; synchronized (creditsAdded) { credits = getCreditsToAdd(); if (credits < 1) { linkHasNoCredits.compareAndSet(false, true); } else { logger.info("linkName[{}] entityPath[{}] credits[{}] Link is empty. Adding more credits.", linkName, entityPath, credits); } } return credits; }); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().filter(e -> e == AmqpEndpointState.ACTIVE).next() .flatMap(state -> { final Mono<Void> operation; synchronized (creditsAdded) { final int creditsToAdd = getCreditsToAdd(); final int total = Math.max(prefetch, creditsToAdd); logger.verbose("linkName[{}] prefetch[{}] creditsToAdd[{}] Adding initial credits.", linkName, prefetch, creditsToAdd); operation = next.addCredits(total); } return operation; }) .subscribe(noop -> { }, error -> logger.info("linkName[{}] was already closed. Could not add credits.", linkName)), next.getEndpointStates().subscribeOn(Schedulers.boundedElastic()).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("linkName[{}] credits[{}] is active.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("linkName[{}] entityPath[{}] trackingId[{}] Link lost signal received" + " for a link that is not current. Ignoring the error.", linkName, entityPath, errorContext.getTrackingId()); return; } } } currentLink = null; onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("linkName[{}] entityPath[{}] Terminal state reached. Disposing of link " + "processor.", linkName, entityPath); dispose(); } else { logger.info("linkName[{}] entityPath[{}] Receive link endpoint states are closed. " + "Requesting another.", linkName, entityPath); final AmqpReceiveLink existing = currentLink; currentLink = null; currentLinkName = null; disposeReceiver(existing); requestUpstream(); } }), next.receive() .onBackpressureBuffer(maxQueueSize, BufferOverflowStrategy.ERROR) .subscribe(message -> { messageQueue.add(message); drain(); })); } disposeReceiver(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } } /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", currentLinkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("linkName[{}] Error on receive link.", currentLinkName, throwable); if (isTerminated() || isCancelled) { logger.info("linkName[{}] AmqpReceiveLinkProcessor is terminated. Cannot process another error.", currentLinkName, throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("linkName[{}] Parent connection is disposed. Not reopening on error.", currentLinkName); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("linkName[{}] Receive link completed from upstream.", currentLinkName); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { if (isTerminated.getAndSet(true)) { return; } logger.info("linkName[{}] Disposing receive link.", currentLinkName); drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); addCreditsToLink("Backpressure request from downstream. Request: " + request); drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { disposeReceiver(currentLink); currentLink = null; currentLinkName = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { drainQueue(); missed = wip.addAndGet(-missed); } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("linkName[{}] entityPath[{}] Exception occurred while handling downstream onNext " + "operation.", currentLinkName, entityPath, e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } if (numberRequested > 0L && isEmpty) { addCreditsToLink("Adding more credits in drain loop."); } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } disposeReceiver(currentLink); messageQueue.clear(); return true; } /** * Consolidates all credits calculation when checking to see if more should be added. This is invoked in * {@link * * Calculates if there are enough credits to satisfy the downstream subscriber. If there is not AND the link has no * more credits, we will add them onto the link. * * In the case that the link has some credits, but _not_ enough to satisfy the request, when the link is empty, it * will call {@link AmqpReceiveLink * * @param message Additional message for context. */ private void addCreditsToLink(String message) { synchronized (creditsAdded) { final AmqpReceiveLink link = currentLink; final int credits = getCreditsToAdd(); if (link == null) { logger.verbose("entityPath[{}] creditsToAdd[{}] There is no link to add credits to.", entityPath, credits); return; } final String linkName = link.getLinkName(); if (credits < 1) { logger.verbose("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no additional credits to add.", linkName, entityPath, credits); return; } if (linkHasNoCredits.compareAndSet(true, false)) { logger.info("linkName[{}] entityPath[{}] creditsToAdd[{}] There are no more credits on link." + " Adding more. {}", linkName, entityPath, credits, message); link.addCredits(credits).subscribe(noop -> { }, error -> { logger.info("linkName[{}] entityPath[{}] was already closed. Could not add credits.", linkName, entityPath); linkHasNoCredits.compareAndSet(false, true); }); } } } /** * Gets the number of credits to add based on {@link * If {@link * then we use the {@link * * @return The number of credits to add. */ private void disposeReceiver(AmqpReceiveLink link) { if (link == null) { return; } try { ((AsyncCloseable) link).closeAsync().subscribe(); } catch (Exception error) { logger.warning("linkName[{}] entityPath[{}] Unable to dispose of link.", link.getLinkName(), link.getEntityPath(), error); } } }
This breaks runtime backwards compatibility
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { Objects.requireNonNull(data, "'content' cannot be null."); if (length != null && length < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); }
Objects.requireNonNull(data, "'content' cannot be null.");
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final int CHUNK_SIZE = 8092; static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * <p> * If the {@code data} is null or a zero length string an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, CHUNK_SIZE); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. */ public static BinaryData fromFile(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data))); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance being updated as well. To * safely update the byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile} * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended * to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
Yes, but I am not sure why we would want to have a BinaryData instance created from a null `Flux<ByteBuffer>`.
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { Objects.requireNonNull(data, "'content' cannot be null."); if (length != null && length < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); }
Objects.requireNonNull(data, "'content' cannot be null.");
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final int CHUNK_SIZE = 8092; static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * <p> * If the {@code data} is null or a zero length string an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, CHUNK_SIZE); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. */ public static BinaryData fromFile(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data))); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance being updated as well. To * safely update the byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile} * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended * to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
Why are you copying here?
public byte[] toBytes() { bytes.compareAndSet(null, getBytes()); byte[] data = this.bytes.get(); return Arrays.copyOf(data, data.length); }
return Arrays.copyOf(data, data.length);
public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { return FluxUtil.collectBytesInByteBufferStream(toFluxByteBuffer()) .share() .block(); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { try { return Files.readAllBytes(file); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } }
I wonder if there is a more performant way to do this, e.g. with a MappedByteBuffer or similar where we might lazily load into the ByteBuffer rather than do the work upfront?
public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); }
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { bytes.compareAndSet(null, getBytes()); byte[] data = this.bytes.get(); return Arrays.copyOf(data, data.length); } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { return FluxUtil.collectBytesInByteBufferStream(toFluxByteBuffer()) .share() .block(); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { try { return Files.readAllBytes(file); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } }
Fixed to not copy here.
public byte[] toBytes() { bytes.compareAndSet(null, getBytes()); byte[] data = this.bytes.get(); return Arrays.copyOf(data, data.length); }
return Arrays.copyOf(data, data.length);
public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { return FluxUtil.collectBytesInByteBufferStream(toFluxByteBuffer()) .share() .block(); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { try { return Files.readAllBytes(file); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } }
What was this attempting to do?
private byte[] getBytes() { return FluxUtil.collectBytesInByteBufferStream(toFluxByteBuffer()) .share() .block(); }
private byte[] getBytes() { try { return Files.readAllBytes(file); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } }
Updated to use mapped byte buffer.
public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); }
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { bytes.compareAndSet(null, getBytes()); byte[] data = this.bytes.get(); return Arrays.copyOf(data, data.length); } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { return FluxUtil.collectBytesInByteBufferStream(toFluxByteBuffer()) .share() .block(); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { try { return Files.readAllBytes(file); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } }
Should allow null data here, the underlying serializer should determine whether this is allowed. For example, Jackson can turn `null` into JSON-null.
public static BinaryData fromObject(Object data) { Objects.requireNonNull(data, "'data' cannot be null."); return fromObject(data, SERIALIZER); }
Objects.requireNonNull(data, "'data' cannot be null.");
public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, should be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data))); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, should be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance to be updated as well. To * safely update the byte array, it is recommended to make a copy of the contents first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { Objects.requireNonNull(data, "'data' cannot be null."); return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null or {@code data} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null or {@code data} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. This method checks for the existence of * the file at the time of creating an instance of {@link BinaryData}. The file, however, is not read until there * is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile} * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses {@link Path file} as its data. This method checks for the existence of * the file at the time of creating an instance of {@link BinaryData}. The file, however, is not read until there * is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. */ public static BinaryData fromFile(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. To safely update the byte array, it is recommended to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data))); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance being updated as well. To * safely update the byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile} * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended * to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
Same comment
public static Mono<BinaryData> fromObjectAsync(Object data) { Objects.requireNonNull(data, "'data' cannot be null."); return fromObjectAsync(data, SERIALIZER); }
Objects.requireNonNull(data, "'data' cannot be null.");
public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, should be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data))); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, should be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance to be updated as well. To * safely update the byte array, it is recommended to make a copy of the contents first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { Objects.requireNonNull(data, "'data' cannot be null."); return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null or {@code data} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null or {@code data} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses {@link Path} as its data. This method checks for the existence of * the file at the time of creating an instance of {@link BinaryData}. The file, however, is not read until there * is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile} * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses {@link Path file} as its data. This method checks for the existence of * the file at the time of creating an instance of {@link BinaryData}. The file, however, is not read until there * is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. */ public static BinaryData fromFile(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. To safely update the byte array, it is recommended to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The * stream content is not cached if the stream is not read into a format that requires the content to be fully read * into memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return new BinaryData(new InputStreamContent(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data))); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. The source flux * is subscribed to as many times as the content is read. The flux, therefore, must be replayable. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { if (data == null) { return monoError(LOGGER, new NullPointerException("'content' cannot be null.")); } if (length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input * byte array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array * outside of this instance will result in the contents of this BinaryData instance being updated as well. To * safely update the byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. *</p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile} * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus {@code * length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the * underlying byte array. Modifying the contents of the returned byte array will also change the content of this * BinaryData instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended * to make a copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of * type {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the * same type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * {@codesnippet com.azure.util.BinaryData.toByteBuffer} * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The * content is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not * specify the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } }
Should the non-null check in `BinaryData` be removed as it is being performed here?
public ByteArrayContent(byte[] content) { this.content = Objects.requireNonNull(content, "'content' cannot be null"); }
this.content = Objects.requireNonNull(content, "'content' cannot be null");
public ByteArrayContent(byte[] content) { this.content = Objects.requireNonNull(content, "'content' cannot be null"); }
class ByteArrayContent extends BinaryDataContent { private final byte[] content; /** * Creates a new instance of {@link ByteArrayContent}. * * @param content The byte array content. * @throws NullPointerException if {@code content} is null. */ @Override public Long getLength() { return (long) this.content.length; } @Override public String toString() { return new String(content, StandardCharsets.UTF_8); } @Override public byte[] toBytes() { return content; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserializeFromBytes(this.content, typeReference); } @Override public InputStream toStream() { return new ByteArrayInputStream(this.content); } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(this.content).asReadOnlyBuffer(); } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.defer(() -> Flux.just(ByteBuffer.wrap(toBytes()))); } }
class ByteArrayContent extends BinaryDataContent { private final byte[] content; /** * Creates a new instance of {@link ByteArrayContent}. * * @param content The byte array content. * @throws NullPointerException if {@code content} is null. */ @Override public Long getLength() { return (long) this.content.length; } @Override public String toString() { return new String(content, StandardCharsets.UTF_8); } @Override public byte[] toBytes() { return content; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserializeFromBytes(this.content, typeReference); } @Override public InputStream toStream() { return new ByteArrayInputStream(this.content); } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(this.content).asReadOnlyBuffer(); } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.defer(() -> Flux.just(ByteBuffer.wrap(toBytes()).asReadOnlyBuffer())); } }
Should this emit read-only buffers as well?
public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.defer(() -> Flux.just(ByteBuffer.wrap(toBytes()))); }
return Flux.defer(() -> Flux.just(ByteBuffer.wrap(toBytes())));
public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.defer(() -> Flux.just(ByteBuffer.wrap(toBytes()).asReadOnlyBuffer())); }
class ByteArrayContent extends BinaryDataContent { private final byte[] content; /** * Creates a new instance of {@link ByteArrayContent}. * * @param content The byte array content. * @throws NullPointerException if {@code content} is null. */ public ByteArrayContent(byte[] content) { this.content = Objects.requireNonNull(content, "'content' cannot be null"); } @Override public Long getLength() { return (long) this.content.length; } @Override public String toString() { return new String(content, StandardCharsets.UTF_8); } @Override public byte[] toBytes() { return content; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserializeFromBytes(this.content, typeReference); } @Override public InputStream toStream() { return new ByteArrayInputStream(this.content); } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(this.content).asReadOnlyBuffer(); } @Override }
class ByteArrayContent extends BinaryDataContent { private final byte[] content; /** * Creates a new instance of {@link ByteArrayContent}. * * @param content The byte array content. * @throws NullPointerException if {@code content} is null. */ public ByteArrayContent(byte[] content) { this.content = Objects.requireNonNull(content, "'content' cannot be null"); } @Override public Long getLength() { return (long) this.content.length; } @Override public String toString() { return new String(content, StandardCharsets.UTF_8); } @Override public byte[] toBytes() { return content; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserializeFromBytes(this.content, typeReference); } @Override public InputStream toStream() { return new ByteArrayInputStream(this.content); } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(this.content).asReadOnlyBuffer(); } @Override }
Same comments about removing guards in `BinaryData`
public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); }
}
public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { return FluxUtil.collectBytesInByteBufferStream(toFluxByteBuffer()) .share() .block(); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } private byte[] getBytes() { try { return Files.readAllBytes(file); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } }
Not a fan of using `block`, could this instead be implemented with Files.readAllBytes? https://docs.oracle.com/javase/8/docs/api/java/nio/file/Files.html#readAllBytes-java.nio.file.Path-
private byte[] getBytes() { return FluxUtil.collectBytesInByteBufferStream(toFluxByteBuffer()) .share() .block(); }
.block();
private byte[] getBytes() { try { return Files.readAllBytes(file); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } }
class FileContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(FileContent.class); private final Path file; private final int chunkSize; private final long length; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link FileContent}. * * @param file The {@link Path} content. * @param chunkSize The requested size for each read of the path. * @throws NullPointerException if {@code file} is null. * @throws IllegalArgumentException if {@code chunkSize} is less than or equal to zero. */ public FileContent(Path file, int chunkSize) { Objects.requireNonNull(file, "'file' cannot be null."); if (chunkSize <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'chunkSize' cannot be less than or equal to 0.")); } this.file = file; this.chunkSize = chunkSize; if (!file.toFile().exists()) { throw LOGGER.logExceptionAsError(new UncheckedIOException( new FileNotFoundException("File does not exist " + file))); } this.length = file.toFile().length(); } @Override public Long getLength() { return this.length; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserialize(toStream(), typeReference); } @Override public InputStream toStream() { try { return new BufferedInputStream(new FileInputStream(file.toFile()), chunkSize); } catch (FileNotFoundException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException("File not found " + file, e)); } } @Override public ByteBuffer toByteBuffer() { try { FileChannel fileChannel = FileChannel.open(file); return fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, length); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.using(() -> FileChannel.open(file), channel -> Flux.generate(() -> 0, (count, sink) -> { if (count == length) { sink.complete(); return count; } int readCount = (int) Math.min(chunkSize, length - count); try { sink.next(channel.map(FileChannel.MapMode.READ_ONLY, count, readCount)); } catch (IOException ex) { sink.error(ex); } return count + readCount; }), channel -> { try { channel.close(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(Exceptions.propagate(ex)); } }); } }
Any reason the FluxUtil implementation isn't being used?
public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.generate(() -> 0, (count, sink) -> { byte[] data = new byte[STREAM_READ_SIZE]; try { int read = this.content.read(data, 0, data.length); if (read == -1) { sink.complete(); } else { sink.next(ByteBuffer.wrap(data, 0, read)); } } catch (IOException ex) { sink.error(ex); } return 0; }); }
return Flux.generate(() -> 0, (count, sink) -> {
public Flux<ByteBuffer> toFluxByteBuffer() { return FluxUtil.toFluxByteBuffer(this.content, STREAM_READ_SIZE); }
class InputStreamContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class); private final InputStream content; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates an instance of {@link InputStreamContent}. * * @param inputStream The inputStream that is used as the content for this instance. * @throws NullPointerException if {@code content} is null. */ public InputStreamContent(InputStream inputStream) { this.content = Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); } @Override public Long getLength() { if (bytes.get() != null) { return (long) bytes.get().length; } return null; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserializeFromBytes(toBytes(), typeReference); } @Override public InputStream toStream() { return this.content; } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); } @Override private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = this.content.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } }
class InputStreamContent extends BinaryDataContent { private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class); private final InputStream content; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates an instance of {@link InputStreamContent}. * * @param inputStream The inputStream that is used as the content for this instance. * @throws NullPointerException if {@code content} is null. */ public InputStreamContent(InputStream inputStream) { this.content = Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); } @Override public Long getLength() { if (bytes.get() != null) { return (long) bytes.get().length; } return null; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserializeFromBytes(toBytes(), typeReference); } @Override public InputStream toStream() { return this.content; } @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); } @Override private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = this.content.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } }
Oversight?
public InputStream toStream() { return null; }
return null;
public InputStream toStream() { return new ByteArrayInputStream(getBytes()); }
class SerializableContent extends BinaryDataContent { private final Object content; private final ObjectSerializer serializer; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link SerializableContent}. * @param content The serializable object that forms the content of this instance. * @param serializer The serializer that serializes the {@code content}. * @throws NullPointerException if {@code content} or {@code serializer} is null. */ public SerializableContent(Object content, ObjectSerializer serializer) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); } @Override public Long getLength() { return null; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { if (content == null) { return null; } return serializer.deserializeFromBytes(toBytes(), typeReference); } @Override @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.defer(() -> Flux.just(ByteBuffer.wrap(toBytes()))); } private byte[] getBytes() { return serializer.serializeToBytes(content); } }
class SerializableContent extends BinaryDataContent { private final Object content; private final ObjectSerializer serializer; private final AtomicReference<byte[]> bytes = new AtomicReference<>(); /** * Creates a new instance of {@link SerializableContent}. * @param content The serializable object that forms the content of this instance. * @param serializer The serializer that serializes the {@code content}. * @throws NullPointerException if {@code serializer} is null. */ public SerializableContent(Object content, ObjectSerializer serializer) { this.content = content; this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); } @Override public Long getLength() { return null; } @Override public String toString() { return new String(toBytes(), StandardCharsets.UTF_8); } @Override public byte[] toBytes() { byte[] data = this.bytes.get(); if (data == null) { bytes.set(getBytes()); data = this.bytes.get(); } return data; } @Override public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { return serializer.deserializeFromBytes(toBytes(), typeReference); } @Override @Override public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer(); } @Override public Flux<ByteBuffer> toFluxByteBuffer() { return Flux.defer(() -> Flux.just(ByteBuffer.wrap(toBytes()))); } private byte[] getBytes() { return serializer.serializeToBytes(content); } }
the return type of this method `com.azure.messaging.servicebus.ServiceBusMessage#getApplicationProperties` is a `Map<String,Object>`, why don't we just pass in the object value type ?
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Set<String> copyHeaders = new HashSet<String>(); headers.forEach((key, value) -> { copyHeaders.add(key); }); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(message::setMessageId); Optional.of(TIME_TO_LIVE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Duration.class)) .ifPresent(message::setTimeToLive); Optional.of(SCHEDULED_ENQUEUE_TIME) .filter(copyHeaders::remove) .map(key -> headers.get(key, Instant.class)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach(key -> { message.getApplicationProperties().put(key, headers.get(key).toString()); }); }
message.getApplicationProperties().put(key, headers.get(key).toString());
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { if (!headers.containsKey(key)) { headers.put(key, value); } }); return Collections.unmodifiableMap(headers); } private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Set<String> copyHeaders, String key) { return Optional.of(key) .filter(copyHeaders::remove) .map(springMessageHeaders::get) .map(Object::toString) .filter(StringUtils::hasText); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
headers.keySet()
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Set<String> copyHeaders = new HashSet<String>(); headers.forEach((key, value) -> { copyHeaders.add(key); }); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(message::setMessageId); Optional.of(TIME_TO_LIVE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Duration.class)) .ifPresent(message::setTimeToLive); Optional.of(SCHEDULED_ENQUEUE_TIME) .filter(copyHeaders::remove) .map(key -> headers.get(key, Instant.class)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach(key -> { message.getApplicationProperties().put(key, headers.get(key).toString()); }); }
});
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { if (!headers.containsKey(key)) { headers.put(key, value); } }); return Collections.unmodifiableMap(headers); } private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Set<String> copyHeaders, String key) { return Optional.of(key) .filter(copyHeaders::remove) .map(springMessageHeaders::get) .map(Object::toString) .filter(StringUtils::hasText); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
really wired to see this kind of usage of Optional, please stop it
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Set<String> copyHeaders = new HashSet<String>(); headers.forEach((key, value) -> { copyHeaders.add(key); }); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(message::setMessageId); Optional.of(TIME_TO_LIVE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Duration.class)) .ifPresent(message::setTimeToLive); Optional.of(SCHEDULED_ENQUEUE_TIME) .filter(copyHeaders::remove) .map(key -> headers.get(key, Instant.class)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach(key -> { message.getApplicationProperties().put(key, headers.get(key).toString()); }); }
Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE)
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { if (!headers.containsKey(key)) { headers.put(key, value); } }); return Collections.unmodifiableMap(headers); } private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Set<String> copyHeaders, String key) { return Optional.of(key) .filter(copyHeaders::remove) .map(springMessageHeaders::get) .map(Object::toString) .filter(StringUtils::hasText); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
To convert values as String is to avoid non built-in types cannot be encoded when sending AMQP messages. This is actually not a good solution but a bug I think, because when convert a non built-in object to String, it cannot be restored. We could mark it and investigate how to resolve it.
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Set<String> copyHeaders = new HashSet<String>(); headers.forEach((key, value) -> { copyHeaders.add(key); }); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(message::setMessageId); Optional.of(TIME_TO_LIVE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Duration.class)) .ifPresent(message::setTimeToLive); Optional.of(SCHEDULED_ENQUEUE_TIME) .filter(copyHeaders::remove) .map(key -> headers.get(key, Instant.class)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach(key -> { message.getApplicationProperties().put(key, headers.get(key).toString()); }); }
message.getApplicationProperties().put(key, headers.get(key).toString());
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { if (!headers.containsKey(key)) { headers.put(key, value); } }); return Collections.unmodifiableMap(headers); } private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Set<String> copyHeaders, String key) { return Optional.of(key) .filter(copyHeaders::remove) .map(springMessageHeaders::get) .map(Object::toString) .filter(StringUtils::hasText); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
The keySet method of org.springframework.messaging.MessageHeaders returns an unmodified Set. I have updated the copyHeaders as a map and use the putAll API.
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Set<String> copyHeaders = new HashSet<String>(); headers.forEach((key, value) -> { copyHeaders.add(key); }); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(message::setMessageId); Optional.of(TIME_TO_LIVE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Duration.class)) .ifPresent(message::setTimeToLive); Optional.of(SCHEDULED_ENQUEUE_TIME) .filter(copyHeaders::remove) .map(key -> headers.get(key, Instant.class)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach(key -> { message.getApplicationProperties().put(key, headers.get(key).toString()); }); }
});
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { if (!headers.containsKey(key)) { headers.put(key, value); } }); return Collections.unmodifiableMap(headers); } private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Set<String> copyHeaders, String key) { return Optional.of(key) .filter(copyHeaders::remove) .map(springMessageHeaders::get) .map(Object::toString) .filter(StringUtils::hasText); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
Roll back the original code with the right usage of Optional, and add new operations in the `ifPresent` step.
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Set<String> copyHeaders = new HashSet<String>(); headers.forEach((key, value) -> { copyHeaders.add(key); }); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(message::setMessageId); Optional.of(TIME_TO_LIVE) .filter(copyHeaders::remove) .map(key -> headers.get(key, Duration.class)) .ifPresent(message::setTimeToLive); Optional.of(SCHEDULED_ENQUEUE_TIME) .filter(copyHeaders::remove) .map(key -> headers.get(key, Instant.class)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach(key -> { message.getApplicationProperties().put(key, headers.get(key).toString()); }); }
Optional.of(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE)
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { if (!headers.containsKey(key)) { headers.put(key, value); } }); return Collections.unmodifiableMap(headers); } private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Set<String> copyHeaders, String key) { return Optional.of(key) .filter(copyHeaders::remove) .map(springMessageHeaders::get) .map(Object::toString) .filter(StringUtils::hasText); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
We'd better use another method (name) for this, getStringHeader doesn't fit anymore.
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copyHeaders = new HashMap<String, Object>(); copyHeaders.putAll(headers); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); copyHeaders.remove(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE); }); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); Optional.ofNullable(headers.get(TIME_TO_LIVE, Duration.class)).ifPresent(val -> { message.setTimeToLive(val); copyHeaders.remove(TIME_TO_LIVE); }); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); copyHeaders.remove(SCHEDULED_ENQUEUE_TIME); }); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo);
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get the value of a header key from {@link MessageHeaders} as {@link String}, and if the value exists, remove the * header from a copy {@link Set} of original {@link MessageHeaders}. * * @param springMessageHeaders Original {@link MessageHeaders} to get header values. * @param copyHeaders A copy of keys for the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Map<String, Object> copyHeaders, String key) { copyHeaders.remove(key); return Optional.ofNullable(springMessageHeaders.get(key)).map(Object::toString).filter(StringUtils::hasText); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
We should come up with a more descriptive name here.
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copyHeaders = new HashMap<String, Object>(); copyHeaders.putAll(headers); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); copyHeaders.remove(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE); }); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); Optional.ofNullable(headers.get(TIME_TO_LIVE, Duration.class)).ifPresent(val -> { message.setTimeToLive(val); copyHeaders.remove(TIME_TO_LIVE); }); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); copyHeaders.remove(SCHEDULED_ENQUEUE_TIME); }); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
Map<String, Object> copyHeaders = new HashMap<String, Object>();
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get the value of a header key from {@link MessageHeaders} as {@link String}, and if the value exists, remove the * header from a copy {@link Set} of original {@link MessageHeaders}. * * @param springMessageHeaders Original {@link MessageHeaders} to get header values. * @param copyHeaders A copy of keys for the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Map<String, Object> copyHeaders, String key) { copyHeaders.remove(key); return Optional.ofNullable(springMessageHeaders.get(key)).map(Object::toString).filter(StringUtils::hasText); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
I change it as getHeaderAsStringAndRemove now, what do you think?
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copyHeaders = new HashMap<String, Object>(); copyHeaders.putAll(headers); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); copyHeaders.remove(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE); }); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); Optional.ofNullable(headers.get(TIME_TO_LIVE, Duration.class)).ifPresent(val -> { message.setTimeToLive(val); copyHeaders.remove(TIME_TO_LIVE); }); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); copyHeaders.remove(SCHEDULED_ENQUEUE_TIME); }); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo);
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get the value of a header key from {@link MessageHeaders} as {@link String}, and if the value exists, remove the * header from a copy {@link Set} of original {@link MessageHeaders}. * * @param springMessageHeaders Original {@link MessageHeaders} to get header values. * @param copyHeaders A copy of keys for the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Map<String, Object> copyHeaders, String key) { copyHeaders.remove(key); return Optional.ofNullable(springMessageHeaders.get(key)).map(Object::toString).filter(StringUtils::hasText); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
Change for headersForApplicationProperties, is it OK?
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copyHeaders = new HashMap<String, Object>(); copyHeaders.putAll(headers); getStringHeader(headers, copyHeaders, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, copyHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, copyHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, copyHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); copyHeaders.remove(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE); }); getStringHeader(headers, copyHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); Optional.ofNullable(headers.get(TIME_TO_LIVE, Duration.class)).ifPresent(val -> { message.setTimeToLive(val); copyHeaders.remove(TIME_TO_LIVE); }); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME)) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); copyHeaders.remove(SCHEDULED_ENQUEUE_TIME); }); getStringHeader(headers, copyHeaders, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, copyHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, copyHeaders, TO).ifPresent(message::setTo); getStringHeader(headers, copyHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, copyHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copyHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
Map<String, Object> copyHeaders = new HashMap<String, Object>();
protected void setCustomHeaders(MessageHeaders headers, ServiceBusMessage message) { Map<String, Object> copySpringMessageHeaders = new HashMap<String, Object>(); copySpringMessageHeaders.putAll(headers); getAndRemove(copySpringMessageHeaders, MessageHeaders.ID, UUID.class) .ifPresent(val -> message.setMessageId(val.toString())); getAndRemove(copySpringMessageHeaders, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getAndRemove(copySpringMessageHeaders, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getAndRemove(copySpringMessageHeaders, AzureHeaders.RAW_ID).ifPresent(val -> { message.setMessageId(val); logOverriddenHeaders(AzureHeaders.RAW_ID, MessageHeaders.ID, headers); }); getAndRemove(copySpringMessageHeaders, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class) .map(Duration::ofMillis) .map(Instant.now()::plus) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(message::setScheduledEnqueueTime); getAndRemove(copySpringMessageHeaders, MESSAGE_ID).ifPresent(val -> { message.setMessageId(val); if (!logOverriddenHeaders(MESSAGE_ID, AzureHeaders.RAW_ID, headers)) { logOverriddenHeaders(MESSAGE_ID, MessageHeaders.ID, headers); } }); getAndRemove(copySpringMessageHeaders, TIME_TO_LIVE, Duration.class).ifPresent(message::setTimeToLive); getAndRemove(copySpringMessageHeaders, SCHEDULED_ENQUEUE_TIME, Instant.class) .map((ins) -> OffsetDateTime.ofInstant(ins, ZoneId.systemDefault())) .ifPresent(val -> { message.setScheduledEnqueueTime(val); logOverriddenHeaders(SCHEDULED_ENQUEUE_TIME, AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, headers); }); getAndRemove(copySpringMessageHeaders, SESSION_ID).ifPresent(message::setSessionId); getAndRemove(copySpringMessageHeaders, CORRELATION_ID).ifPresent(message::setCorrelationId); getAndRemove(copySpringMessageHeaders, TO).ifPresent(message::setTo); getAndRemove(copySpringMessageHeaders, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getAndRemove(copySpringMessageHeaders, PARTITION_KEY).ifPresent(message::setPartitionKey); copySpringMessageHeaders.forEach((key, value) -> { message.getApplicationProperties().put(key, value.toString()); }); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get the value of a header key from {@link MessageHeaders} as {@link String}, and if the value exists, remove the * header from a copy {@link Set} of original {@link MessageHeaders}. * * @param springMessageHeaders Original {@link MessageHeaders} to get header values. * @param copyHeaders A copy of keys for the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, Map<String, Object> copyHeaders, String key) { copyHeaders.remove(key); return Optional.ofNullable(springMessageHeaders.get(key)).map(Object::toString).filter(StringUtils::hasText); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<ServiceBusReceivedMessage, ServiceBusMessage> { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(ServiceBusReceivedMessage azureMessage) { final BinaryData body = azureMessage.getBody(); return body == null ? null : body.toBytes(); } @Override protected ServiceBusMessage fromString(String payload) { return new ServiceBusMessage(payload); } @Override protected ServiceBusMessage fromByte(byte[] payload) { return new ServiceBusMessage(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(ServiceBusReceivedMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTime()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); message.getApplicationProperties().forEach((key, value) -> { headers.putIfAbsent(key, value); }); return Collections.unmodifiableMap(headers); } /** * Get and remove the header value as {@link String} from a copy of {@link MessageHeaders} . * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @return {@link Optional} of the header value. */ private Optional<String> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key) { return getAndRemove(copySpringMessageHeaders, key, String.class).filter(StringUtils::hasText); } /** * Get and remove the header value from a copy of {@link MessageHeaders} and convert to the target type. * * @param copySpringMessageHeaders A copy of the original {@link MessageHeaders}. * @param key The header key to get value. * @param clazz The class that the header value converts to. * @param <T> The generic type of the class. * @return {@link Optional} of the header value. */ private <T> Optional<T> getAndRemove(Map<String, Object> copySpringMessageHeaders, String key, Class<T> clazz) { return Optional.ofNullable(clazz.cast(copySpringMessageHeaders.remove(key))); } private Boolean logOverriddenHeaders(String currentHeader, String overriddenHeader, MessageHeaders springMessageHeaders) { Boolean isExisted = false; if (springMessageHeaders.containsKey(overriddenHeader)) { isExisted = true; LOGGER.warn("{} header detected, usage of {} header will be overridden", currentHeader, overriddenHeader); } return isExisted; } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value).filter(StringUtils::hasText).ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value).ifPresent(s -> map.put(key, s)); } }
might want to catch errors/exceptions here and resume the program, as any error at this stage will prevent cleanup from happening below.
public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfStressTest<?>[] tests = new PerfStressTest<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfStressTest<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfStressTest::setupAsync).blockLast(); setupStatus.dispose(); if (options.getTestProxy() != null) { Disposable recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfStressTest::recordAndStartPlaybackAsync).blockLast(); startedPlayback = true; recordStatus.dispose(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { if (startedPlayback) { Disposable playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfStressTest::stopPlaybackAsync).blockLast(); playbackStatus.dispose(); } if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(t -> t.cleanupAsync()).blockLast(); } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } }
Flux.just(tests).flatMap(PerfStressTest::stopPlaybackAsync).blockLast();
public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfStressTest<?>[] tests = new PerfStressTest<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfStressTest<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfStressTest::setupAsync).blockLast(); setupStatus.dispose(); if (options.getTestProxy() != null) { Disposable recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfStressTest::recordAndStartPlaybackAsync).blockLast(); startedPlayback = true; recordStatus.dispose(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Disposable playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfStressTest::stopPlaybackAsync).blockLast(); playbackStatus.dispose(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(t -> t.cleanupAsync()).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } }
class to execute. * @param options the configuration ro run performance test with. * * @throws RuntimeException if the execution fails. */
class to execute. * @param options the configuration ro run performance test with. * * @throws RuntimeException if the execution fails. */
Map<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<>();
private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { HashMap<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<String, AppConfigurationStoreHealth>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refresh(StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; }
HashMap<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<String, AppConfigurationStoreHealth>();
private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { Map<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refresh(StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final ClientStore clientStore; private HashMap<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; public AppConfigurationRefresh(AppConfigurationProperties properties, ClientStore clientStore) { this.configStores = properties.getStores(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<String, AppConfigurationStoreHealth>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); } @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } public void expireRefreshInterval(String endpoint) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Date date = new Date(); if (date.after(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { SettingSelector settingSelector = new SettingSelector().setKeyFilter(watchKey.getKey()) .setLabelFilter(watchKey.getLabel()); ConfigurationSetting watchedKey = clientStore.getWatchKey(settingSelector, endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(endpoint, state.getWatchKeys(), refreshInterval); } return false; } public HashMap<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final ClientStore clientStore; private Map<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; public AppConfigurationRefresh(AppConfigurationProperties properties, ClientStore clientStore) { this.configStores = properties.getStores(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); } @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } public void expireRefreshInterval(String endpoint) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Date date = new Date(); if (date.after(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { SettingSelector settingSelector = new SettingSelector().setKeyFilter(watchKey.getKey()) .setLabelFilter(watchKey.getLabel()); ConfigurationSetting watchedKey = clientStore.getWatchKey(settingSelector, endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(state, refreshInterval); } return false; } public Map<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
this.clientHealth = new HashMap<>();
public AppConfigurationRefresh(AppConfigurationProperties properties, ClientStore clientStore) { this.configStores = properties.getStores(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<String, AppConfigurationStoreHealth>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); }
this.clientHealth = new HashMap<String, AppConfigurationStoreHealth>();
public AppConfigurationRefresh(AppConfigurationProperties properties, ClientStore clientStore) { this.configStores = properties.getStores(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final ClientStore clientStore; private HashMap<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } public void expireRefreshInterval(String endpoint) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { HashMap<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<String, AppConfigurationStoreHealth>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refresh(StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; } /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Date date = new Date(); if (date.after(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { SettingSelector settingSelector = new SettingSelector().setKeyFilter(watchKey.getKey()) .setLabelFilter(watchKey.getLabel()); ConfigurationSetting watchedKey = clientStore.getWatchKey(settingSelector, endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(endpoint, state.getWatchKeys(), refreshInterval); } return false; } public HashMap<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final ClientStore clientStore; private Map<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } public void expireRefreshInterval(String endpoint) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { Map<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<>(); configStores.stream().forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refresh(StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; } /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Date date = new Date(); if (date.after(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { SettingSelector settingSelector = new SettingSelector().setKeyFilter(watchKey.getKey()) .setLabelFilter(watchKey.getLabel()); ConfigurationSetting watchedKey = clientStore.getWatchKey(settingSelector, endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(state, refreshInterval); } return false; } public Map<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
better AppConfigurationStoreHealth.DOWN.equals(refresh.getAppConfigurationStoresHealth().get(store))
public Health health() { Health.Builder healthBuilder = new Health.Builder(); Boolean healthy = true; for (String store : refresh.getAppConfigurationStoresHealth().keySet()) { if (refresh.getAppConfigurationStoresHealth().get(store).equals(AppConfigurationStoreHealth.DOWN)) { healthy = false; healthBuilder.withDetail(store, "DOWN"); } else if (refresh.getAppConfigurationStoresHealth().get(store).equals(AppConfigurationStoreHealth.NOT_LOADED)) { healthBuilder.withDetail(store, "NOT LOADED"); } else { healthBuilder.withDetail(store, "UP"); } } if (!healthy) { return healthBuilder.down().build(); } return healthBuilder.up().build(); }
if (refresh.getAppConfigurationStoresHealth().get(store).equals(AppConfigurationStoreHealth.DOWN)) {
public Health health() { Health.Builder healthBuilder = new Health.Builder(); Boolean healthy = true; for (String store : refresh.getAppConfigurationStoresHealth().keySet()) { if (AppConfigurationStoreHealth.DOWN.equals(refresh.getAppConfigurationStoresHealth().get(store))) { healthy = false; healthBuilder.withDetail(store, "DOWN"); } else if (refresh.getAppConfigurationStoresHealth().get(store).equals(AppConfigurationStoreHealth.NOT_LOADED)) { healthBuilder.withDetail(store, "NOT LOADED"); } else { healthBuilder.withDetail(store, "UP"); } } if (!healthy) { return healthBuilder.down().build(); } return healthBuilder.up().build(); }
class AppConfigurationHealthIndicator implements HealthIndicator { private final AppConfigurationRefresh refresh; public AppConfigurationHealthIndicator(AppConfigurationRefresh refresh) { this.refresh = refresh; } @Override }
class AppConfigurationHealthIndicator implements HealthIndicator { private final AppConfigurationRefresh refresh; public AppConfigurationHealthIndicator(AppConfigurationRefresh refresh) { this.refresh = refresh; } @Override }
nit: new ArrayList<>();
public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); }
List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>();
public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
Thank you :)
static void validateValues(String argName, String argValue, String ... expectedValues) { if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) { throw new IllegalArgumentException(String.format("%s must match %s", argName, expectedValues)); } }
throw new IllegalArgumentException(String.format("%s must match %s", argName, expectedValues));
static void validateValues(String argName, String argValue, String ... expectedValues) { if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) { throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues))); } }
class Utils { public static final String COMMANDLINE_INPUTFILE = "inputfile"; public static final String COMMANDLINE_OUTPUTFILE = "outputfile"; public static final String COMMANDLINE_POMFILE = "pomfile"; public static final String COMMANDLINE_MODE = "mode"; public static final String ANALYZE_MODE = "analyze"; public static final String GENERATE_MODE = "generate"; public static final String COMMANDLINE_EXTERNALDEPENDENCIES = "externalDependencies"; public static final String COMMANDLINE_GROUPID = "groupid"; public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)"); public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent"); public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)"); public static final String BASE_AZURE_GROUPID = "com.azure"; public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test"; public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf"; public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient(); public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]"); public static final String PROJECT_VERSION = "project.version"; public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList( "junit-jupiter-api" )); public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(Arrays.asList( "slf4j-api" )); public static final String POM_TYPE = "pom"; private static Logger logger = LoggerFactory.getLogger(Utils.class); static void validateNotNullOrEmpty(String argValue, String argName) { if(argValue == null || argValue.isEmpty()) { throw new NullPointerException(String.format("%s can't be null", argName)); } } static void validateNull(String argValue, String argName) { if(argValue != null) { throw new IllegalArgumentException(String.format("%s should be null", argName)); } } static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) { List<BomDependency> allResolvedDependencies = new ArrayList<>(); for (Dependency dependency : dependencies) { List<BomDependency> resolvedDependencies = getPomFileContent(dependency); if (resolvedDependencies != null) { allResolvedDependencies.addAll(resolvedDependencies); } } return allResolvedDependencies; } static List<BomDependency> getPomFileContent(Dependency dependency) { String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId()); String url = null; if(groups.length == 2) { url = "https: } else if (groups.length == 3) { url = "https: } else { throw new UnsupportedOperationException("Can't parse the external BOM file."); } HttpRequest request = HttpRequest.newBuilder() .uri(URI.create(url)) .GET() .header("accept", "application/xml") .timeout(Duration.ofMillis(5000)) .build(); return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream()) .thenApply(response -> { if(response.statusCode() == 200) { try (InputStreamReader reader = new InputStreamReader(response.body())) { return Utils.parsePomFileContent(reader); } catch (IOException ex) { logger.error("Failed to read contents for {}", dependency.toString()); } } return null; }).join(); } static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) { return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId()); } static List<BomDependency> parsePomFileContent(String fileName) { try (FileReader reader = new FileReader(fileName)) { return parsePomFileContent(reader); } catch (IOException exception) { logger.error("Failed to read the contents of the pom file: {}", fileName); } return new ArrayList<>(); } static List<BomDependency> parsePomFileContent(Reader responseStream) { MavenXpp3Reader reader = new MavenXpp3Reader(); try { Model model = reader.read(responseStream); DependencyManagement management = model.getDependencyManagement(); return management.getDependencies().stream().map(dep -> { String version = getPropertyName(dep.getVersion()); while(model.getProperties().getProperty(version) != null) { version = getPropertyName(model.getProperties().getProperty(version)); if(version.equals(PROJECT_VERSION)) { version = model.getVersion(); } } if(version == null) { version = dep.getVersion(); } BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version); return bomDependency; }).collect(Collectors.toList()); } catch (IOException exception) { exception.printStackTrace(); } catch (XmlPullParserException e) { e.printStackTrace(); } return null; } private static String getPropertyName(String propertyValue) { if(propertyValue.startsWith("${")) { return propertyValue.substring(2, propertyValue.length() - 1); } return propertyValue; } }
class Utils { public static final String COMMANDLINE_INPUTFILE = "inputfile"; public static final String COMMANDLINE_OUTPUTFILE = "outputfile"; public static final String COMMANDLINE_POMFILE = "pomfile"; public static final String COMMANDLINE_MODE = "mode"; public static final String ANALYZE_MODE = "analyze"; public static final String GENERATE_MODE = "generate"; public static final String COMMANDLINE_EXTERNALDEPENDENCIES = "externalDependencies"; public static final String COMMANDLINE_GROUPID = "groupid"; public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)"); public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent"); public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)"); public static final String BASE_AZURE_GROUPID = "com.azure"; public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test"; public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf"; public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient(); public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]"); public static final String PROJECT_VERSION = "project.version"; public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList( "junit-jupiter-api" )); public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(Arrays.asList( "slf4j-api" )); public static final String POM_TYPE = "pom"; private static Logger logger = LoggerFactory.getLogger(Utils.class); static void validateNotNullOrEmpty(String argValue, String argName) { if(argValue == null || argValue.isEmpty()) { throw new NullPointerException(String.format("%s can't be null", argName)); } } static void validateNull(String argValue, String argName) { if(argValue != null) { throw new IllegalArgumentException(String.format("%s should be null", argName)); } } static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) { List<BomDependency> allResolvedDependencies = new ArrayList<>(); for (Dependency dependency : dependencies) { List<BomDependency> resolvedDependencies = getPomFileContent(dependency); if (resolvedDependencies != null) { allResolvedDependencies.addAll(resolvedDependencies); } } return allResolvedDependencies; } static List<BomDependency> getPomFileContent(Dependency dependency) { String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId()); String url = null; if(groups.length == 2) { url = "https: } else if (groups.length == 3) { url = "https: } else { throw new UnsupportedOperationException("Can't parse the external BOM file."); } HttpRequest request = HttpRequest.newBuilder() .uri(URI.create(url)) .GET() .header("accept", "application/xml") .timeout(Duration.ofMillis(5000)) .build(); return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream()) .thenApply(response -> { if(response.statusCode() == 200) { try (InputStreamReader reader = new InputStreamReader(response.body())) { return Utils.parsePomFileContent(reader); } catch (IOException ex) { logger.error("Failed to read contents for {}", dependency.toString()); } } return null; }).join(); } static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) { return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId()); } static List<BomDependency> parsePomFileContent(String fileName) { try (FileReader reader = new FileReader(fileName)) { return parsePomFileContent(reader); } catch (IOException exception) { logger.error("Failed to read the contents of the pom file: {}", fileName); } return new ArrayList<>(); } static List<BomDependency> parsePomFileContent(Reader responseStream) { MavenXpp3Reader reader = new MavenXpp3Reader(); try { Model model = reader.read(responseStream); DependencyManagement management = model.getDependencyManagement(); return management.getDependencies().stream().map(dep -> { String version = getPropertyName(dep.getVersion()); while(model.getProperties().getProperty(version) != null) { version = getPropertyName(model.getProperties().getProperty(version)); if(version.equals(PROJECT_VERSION)) { version = model.getVersion(); } } if(version == null) { version = dep.getVersion(); } BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version); return bomDependency; }).collect(Collectors.toList()); } catch (IOException exception) { exception.printStackTrace(); } catch (XmlPullParserException e) { e.printStackTrace(); } return null; } private static String getPropertyName(String propertyValue) { if(propertyValue.startsWith("${")) { return propertyValue.substring(2, propertyValue.length() - 1); } return propertyValue; } }
same here
public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); }
List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>();
public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); } @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test @Test public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); } @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test @Test public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
same here
public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); }
List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>();
public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); } @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); } @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
same here, new ArrayList<>();
public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); }
List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>();
public void storeDisabled() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); store.setEnabled(false); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh refresh = new AppConfigurationRefresh(properties, clientStoreMock); refresh.setApplicationEventPublisher(eventPublisher); refresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.NOT_LOADED, refresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); } @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<ConfigurationSetting>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<String, String>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<ConfigurationSetting>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<AppConfigurationStoreTrigger>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
class AppConfigurationRefreshTest { private static final String WATCHED_KEYS = "/application/*"; AppConfigurationRefresh configRefresh; @Mock private ApplicationEventPublisher eventPublisher; @Mock private AppConfigurationProperties properties; private ArrayList<ConfigurationSetting> keys; @Mock private Map<String, List<String>> contextsMap; private AppConfigurationStoreTrigger trigger; private AppConfigurationStoreMonitoring monitoring; private FeatureFlagStore featureFlagStore; @Mock private Date date; @Mock private ClientStore clientStoreMock; @Before public void setup() { MockitoAnnotations.initMocks(this); ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); monitoring = new AppConfigurationStoreMonitoring(); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); monitoring.setFeatureFlagRefreshInterval(Duration.ofMinutes(-60)); monitoring.setEnabled(true); store.setMonitoring(monitoring); featureFlagStore = new FeatureFlagStore(); store.setFeatureFlags(featureFlagStore); properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); contextsMap = new ConcurrentHashMap<>(); contextsMap.put(TEST_STORE_NAME, Arrays.asList(TEST_ETAG)); keys = new ArrayList<ConfigurationSetting>(); ConfigurationSetting kvi = new ConfigurationSetting(); kvi.setKey("fake-etag/application/test.key"); kvi.setValue("TestValue"); keys.add(kvi); ConfigurationSetting item = new ConfigurationSetting(); item.setKey("fake-etag/application/test.key"); item.setETag("fake-etag"); configRefresh = new AppConfigurationRefresh(properties, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); StateHolder.setLoadStateFeatureFlag(TEST_STORE_NAME, true); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(initialResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getFeatureFlagRefreshInterval()); } @After public void cleanupMethod() { StateHolder.setState(TEST_STORE_NAME, new ArrayList<>(), monitoring.getRefreshInterval()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, new ArrayList<>(), monitoring.getFeatureFlagRefreshInterval()); } @Test public void nonUpdatedEtagShouldntPublishEvent() throws Exception { configRefresh.setApplicationEventPublisher(eventPublisher); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.UP, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } @Test public void updatedEtagShouldPublishEvent() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setState(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void updatedFeatureFlagEtagShouldPublishEvent() throws Exception { monitoring.setEnabled(false); featureFlagStore.setEnabled(true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(initialResponse()); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(updatedResponse()); assertTrue(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); List<ConfigurationSetting> watchKeys = new ArrayList<>(); watchKeys.add(updatedResponse()); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); HashMap<String, String> map = new HashMap<>(); map.put("store1_configuration", "fake-etag-updated"); map.put("store1_feature", "fake-etag-updated"); ConfigurationSetting updated = new ConfigurationSetting(); updated.setETag("fake-etag-updated"); watchKeys = new ArrayList<>(); watchKeys.add(updated); StateHolder.setStateFeatureFlag(TEST_STORE_NAME, watchKeys, monitoring.getRefreshInterval()); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(1)).publishEvent(any(RefreshEvent.class)); } @Test public void noEtagReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void watchKeyThrowError() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())) .thenThrow(new RuntimeException("This would be an IO Exception. An existing connection was forcibly closed by the remote host. Test")); configRefresh.setApplicationEventPublisher(eventPublisher); Boolean sawError = false; try { assertFalse(configRefresh.refreshConfigurations().get()); } catch (RuntimeException e) { sawError = true; verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); assertEquals(AppConfigurationStoreHealth.DOWN, configRefresh.getAppConfigurationStoresHealth().get(TEST_STORE_NAME)); } assertTrue(sawError); } @Test public void nullItemsReturned() throws Exception { when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefresh.setApplicationEventPublisher(eventPublisher); assertFalse(configRefresh.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void noInitialStateNoEtag() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(-60)); store.setMonitoring(monitoring); AppConfigurationProperties propertiesLost = new AppConfigurationProperties(); propertiesLost.setStores(Arrays.asList(store)); AppConfigurationRefresh configRefreshLost = new AppConfigurationRefresh(propertiesLost, clientStoreMock); StateHolder.setLoadState(TEST_STORE_NAME, true); when(clientStoreMock.getWatchKey(Mockito.any(), Mockito.anyString())).thenReturn(null); configRefreshLost.setApplicationEventPublisher(eventPublisher); assertFalse(configRefreshLost.refreshConfigurations().get()); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test public void notRefreshTime() throws Exception { ConfigStore store = new ConfigStore(); store.setEndpoint(TEST_STORE_NAME); store.setConnectionString(TEST_CONN_STRING); AppConfigurationStoreMonitoring monitoring = new AppConfigurationStoreMonitoring(); monitoring.setEnabled(true); trigger = new AppConfigurationStoreTrigger(); trigger.setKey(WATCHED_KEYS); trigger.setLabel("\0"); List<AppConfigurationStoreTrigger> triggers = new ArrayList<>(); triggers.add(trigger); monitoring.setTriggers(triggers); monitoring.setRefreshInterval(Duration.ofMinutes(60)); store.setMonitoring(monitoring); AppConfigurationProperties properties = new AppConfigurationProperties(); properties.setStores(Arrays.asList(store)); AppConfigurationRefresh watchLargeDelay = new AppConfigurationRefresh(properties, clientStoreMock); watchLargeDelay.setApplicationEventPublisher(eventPublisher); watchLargeDelay.refreshConfigurations().get(); verify(eventPublisher, times(0)).publishEvent(any(RefreshEvent.class)); } @Test private ConfigurationSetting initialResponse() { return new ConfigurationSetting().setETag("fake-etag"); } private ConfigurationSetting updatedResponse() { return new ConfigurationSetting().setETag("fake-etag-updated"); } }
Should this validate `initialCapacity`?
public HttpHeaders(int initialCapacity) { this.headers = new HashMap<>(initialCapacity); }
this.headers = new HashMap<>(initialCapacity);
public HttpHeaders(int initialCapacity) { this.headers = new HashMap<>(initialCapacity); }
class HttpHeaders implements Iterable<HttpHeader> { private final Map<String, HttpHeader> headers; /** * Create an empty HttpHeaders instance. */ public HttpHeaders() { headers = new HashMap<>(); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the map of initial headers */ public HttpHeaders(Map<String, String> headers) { this.headers = new HashMap<>(headers.size()); headers.forEach(this::set); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the collection of initial headers */ public HttpHeaders(Iterable<HttpHeader> headers) { this.headers = new HashMap<>(); for (final HttpHeader header : headers) { this.set(header.getName(), header.getValue()); } } /** * Create a HttpHeaders instance with an initial {@code size} empty headers * * @param initialCapacity the initial capacity of headers map. */ /** * Gets the number of headers in the collection. * * @return the number of headers in this collection. */ public int getSize() { return headers.size(); } /** * Sets a {@link HttpHeader header} with the given name and value. * * <p>If header with same name already exists then the value will be overwritten.</p> * * @param name the name * @param value the value * @return The updated HttpHeaders object * @deprecated Use {@link */ @Deprecated public HttpHeaders put(String name, String value) { return set(name, value); } /** * Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then * the value will be overwritten. If the given value is null, the header with the given name will be removed. * * @param name the name to set in the header. If it is null, this method will return with no changes to the headers. * @param value the value * @return The updated HttpHeaders object */ public HttpHeaders set(String name, String value) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (value == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, value)); } return this; } /** * Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given * values will be comma-separated when necessary. If a header with same name already exists then the * values will be overwritten. If the given values list is null, the header with the given name will be removed. * * @param name the name * @param values the values that will be comma-separated as appropriate * @return The updated HttpHeaders object */ public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (values == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, values)); } return this; } /** * Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling * {@code headers.forEach(this::set)}, and therefore the behavior is as specified in {@link * In other words, this will create a header for each key in the provided map, replacing or removing an existing * one, depending on the value. If the given values list is null, the header with the given name will be removed. * If the given name is already a header, it will be removed and replaced with the headers provided. * * @param headers a map containing keys representing header names, and keys representing the associated values. * @return The updated HttpHeaders object */ public HttpHeaders setAll(Map<String, List<String>> headers) { headers.forEach(this::set); return this; } /** * Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't * found. * * @param name the name of the header to find. * @return the header if found, null otherwise. */ public HttpHeader get(String name) { return headers.get(formatKey(name)); } /** * Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header * isn't found. * * @param name the name of the header to remove. * @return the header if removed, null otherwise. */ public HttpHeader remove(String name) { return headers.remove(formatKey(name)); } /** * Get the value for the provided header name. {@code Null} is returned if the header name isn't found. * * @param name the name of the header whose value is being retrieved. * @return the value of the header, or null if the header isn't found */ public String getValue(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValue(); } /** * Get the values for the provided header name. {@code Null} is returned if the header name isn't found. * * <p>This returns {@link * * @param name the name of the header whose value is being retrieved. * @return the values of the header, or null if the header isn't found */ public String[] getValues(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValues(); } private String formatKey(final String key) { return key.toLowerCase(Locale.ROOT); } /** * Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the * headers at the time of the toMap call. This map will not change as the underlying http headers change, and nor * will modifying the key or values contained in the map have any effect on the state of the http headers. * * <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly * recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present * on the returned Map class. For example, use the {@link * {@code httpHeaders.toMap().get(name)}.</p> * * @return the headers in a copied and unmodifiable form. */ public Map<String, String> toMap() { final Map<String, String> result = new HashMap<>(); for (final HttpHeader header : headers.values()) { result.put(header.getName(), header.getValue()); } return Collections.unmodifiableMap(result); } /** * {@inheritDoc} */ @Override public Iterator<HttpHeader> iterator() { return headers.values().iterator(); } /** * Get a {@link Stream} representation of the HttpHeader values in this instance. * * @return A {@link Stream} of all header values in this instance. */ public Stream<HttpHeader> stream() { return headers.values().stream(); } @Override public String toString() { return this.stream() .map(header -> header.getName() + "=" + header.getValue()) .collect(Collectors.joining(", ")); } }
class HttpHeaders implements Iterable<HttpHeader> { private final Map<String, HttpHeader> headers; /** * Create an empty HttpHeaders instance. */ public HttpHeaders() { headers = new HashMap<>(); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the map of initial headers */ public HttpHeaders(Map<String, String> headers) { this.headers = new HashMap<>(headers.size()); headers.forEach(this::set); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the collection of initial headers */ public HttpHeaders(Iterable<HttpHeader> headers) { this.headers = new HashMap<>(); for (final HttpHeader header : headers) { this.set(header.getName(), header.getValue()); } } /** * Create a HttpHeaders instance with an initial {@code size} empty headers * * @param initialCapacity the initial capacity of headers map. */ /** * Gets the number of headers in the collection. * * @return the number of headers in this collection. */ public int getSize() { return headers.size(); } /** * Sets a {@link HttpHeader header} with the given name and value. * * <p>If header with same name already exists then the value will be overwritten.</p> * * @param name the name * @param value the value * @return The updated HttpHeaders object * @deprecated Use {@link */ @Deprecated public HttpHeaders put(String name, String value) { return set(name, value); } /** * Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then * the value will be overwritten. If the given value is null, the header with the given name will be removed. * * @param name the name to set in the header. If it is null, this method will return with no changes to the headers. * @param value the value * @return The updated HttpHeaders object */ public HttpHeaders set(String name, String value) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (value == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, value)); } return this; } /** * Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given * values will be comma-separated when necessary. If a header with same name already exists then the * values will be overwritten. If the given values list is null, the header with the given name will be removed. * * @param name the name * @param values the values that will be comma-separated as appropriate * @return The updated HttpHeaders object */ public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (values == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, values)); } return this; } /** * Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling * {@code headers.forEach(this::set)}, and therefore the behavior is as specified in {@link * In other words, this will create a header for each key in the provided map, replacing or removing an existing * one, depending on the value. If the given values list is null, the header with the given name will be removed. * If the given name is already a header, it will be removed and replaced with the headers provided. * * @param headers a map containing keys representing header names, and keys representing the associated values. * @return The updated HttpHeaders object */ public HttpHeaders setAll(Map<String, List<String>> headers) { headers.forEach(this::set); return this; } /** * Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't * found. * * @param name the name of the header to find. * @return the header if found, null otherwise. */ public HttpHeader get(String name) { return headers.get(formatKey(name)); } /** * Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header * isn't found. * * @param name the name of the header to remove. * @return the header if removed, null otherwise. */ public HttpHeader remove(String name) { return headers.remove(formatKey(name)); } /** * Get the value for the provided header name. {@code Null} is returned if the header name isn't found. * * @param name the name of the header whose value is being retrieved. * @return the value of the header, or null if the header isn't found */ public String getValue(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValue(); } /** * Get the values for the provided header name. {@code Null} is returned if the header name isn't found. * * <p>This returns {@link * * @param name the name of the header whose value is being retrieved. * @return the values of the header, or null if the header isn't found */ public String[] getValues(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValues(); } private String formatKey(final String key) { return key.toLowerCase(Locale.ROOT); } /** * Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the * headers at the time of the toMap call. This map will not change as the underlying http headers change, and nor * will modifying the key or values contained in the map have any effect on the state of the http headers. * * <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly * recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present * on the returned Map class. For example, use the {@link * {@code httpHeaders.toMap().get(name)}.</p> * * @return the headers in a copied and unmodifiable form. */ public Map<String, String> toMap() { final Map<String, String> result = new HashMap<>(); for (final HttpHeader header : headers.values()) { result.put(header.getName(), header.getValue()); } return Collections.unmodifiableMap(result); } /** * {@inheritDoc} */ @Override public Iterator<HttpHeader> iterator() { return headers.values().iterator(); } /** * Get a {@link Stream} representation of the HttpHeader values in this instance. * * @return A {@link Stream} of all header values in this instance. */ public Stream<HttpHeader> stream() { return headers.values().stream(); } @Override public String toString() { return this.stream() .map(header -> header.getName() + "=" + header.getValue()) .collect(Collectors.joining(", ")); } }
Do you mean we should validate the initialCapacity to be less than 2^30, which is the max capacity of Java hashmap.
public HttpHeaders(int initialCapacity) { this.headers = new HashMap<>(initialCapacity); }
this.headers = new HashMap<>(initialCapacity);
public HttpHeaders(int initialCapacity) { this.headers = new HashMap<>(initialCapacity); }
class HttpHeaders implements Iterable<HttpHeader> { private final Map<String, HttpHeader> headers; /** * Create an empty HttpHeaders instance. */ public HttpHeaders() { headers = new HashMap<>(); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the map of initial headers */ public HttpHeaders(Map<String, String> headers) { this.headers = new HashMap<>(headers.size()); headers.forEach(this::set); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the collection of initial headers */ public HttpHeaders(Iterable<HttpHeader> headers) { this.headers = new HashMap<>(); for (final HttpHeader header : headers) { this.set(header.getName(), header.getValue()); } } /** * Create a HttpHeaders instance with an initial {@code size} empty headers * * @param initialCapacity the initial capacity of headers map. */ /** * Gets the number of headers in the collection. * * @return the number of headers in this collection. */ public int getSize() { return headers.size(); } /** * Sets a {@link HttpHeader header} with the given name and value. * * <p>If header with same name already exists then the value will be overwritten.</p> * * @param name the name * @param value the value * @return The updated HttpHeaders object * @deprecated Use {@link */ @Deprecated public HttpHeaders put(String name, String value) { return set(name, value); } /** * Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then * the value will be overwritten. If the given value is null, the header with the given name will be removed. * * @param name the name to set in the header. If it is null, this method will return with no changes to the headers. * @param value the value * @return The updated HttpHeaders object */ public HttpHeaders set(String name, String value) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (value == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, value)); } return this; } /** * Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given * values will be comma-separated when necessary. If a header with same name already exists then the * values will be overwritten. If the given values list is null, the header with the given name will be removed. * * @param name the name * @param values the values that will be comma-separated as appropriate * @return The updated HttpHeaders object */ public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (values == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, values)); } return this; } /** * Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling * {@code headers.forEach(this::set)}, and therefore the behavior is as specified in {@link * In other words, this will create a header for each key in the provided map, replacing or removing an existing * one, depending on the value. If the given values list is null, the header with the given name will be removed. * If the given name is already a header, it will be removed and replaced with the headers provided. * * @param headers a map containing keys representing header names, and keys representing the associated values. * @return The updated HttpHeaders object */ public HttpHeaders setAll(Map<String, List<String>> headers) { headers.forEach(this::set); return this; } /** * Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't * found. * * @param name the name of the header to find. * @return the header if found, null otherwise. */ public HttpHeader get(String name) { return headers.get(formatKey(name)); } /** * Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header * isn't found. * * @param name the name of the header to remove. * @return the header if removed, null otherwise. */ public HttpHeader remove(String name) { return headers.remove(formatKey(name)); } /** * Get the value for the provided header name. {@code Null} is returned if the header name isn't found. * * @param name the name of the header whose value is being retrieved. * @return the value of the header, or null if the header isn't found */ public String getValue(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValue(); } /** * Get the values for the provided header name. {@code Null} is returned if the header name isn't found. * * <p>This returns {@link * * @param name the name of the header whose value is being retrieved. * @return the values of the header, or null if the header isn't found */ public String[] getValues(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValues(); } private String formatKey(final String key) { return key.toLowerCase(Locale.ROOT); } /** * Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the * headers at the time of the toMap call. This map will not change as the underlying http headers change, and nor * will modifying the key or values contained in the map have any effect on the state of the http headers. * * <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly * recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present * on the returned Map class. For example, use the {@link * {@code httpHeaders.toMap().get(name)}.</p> * * @return the headers in a copied and unmodifiable form. */ public Map<String, String> toMap() { final Map<String, String> result = new HashMap<>(); for (final HttpHeader header : headers.values()) { result.put(header.getName(), header.getValue()); } return Collections.unmodifiableMap(result); } /** * {@inheritDoc} */ @Override public Iterator<HttpHeader> iterator() { return headers.values().iterator(); } /** * Get a {@link Stream} representation of the HttpHeader values in this instance. * * @return A {@link Stream} of all header values in this instance. */ public Stream<HttpHeader> stream() { return headers.values().stream(); } @Override public String toString() { return this.stream() .map(header -> header.getName() + "=" + header.getValue()) .collect(Collectors.joining(", ")); } }
class HttpHeaders implements Iterable<HttpHeader> { private final Map<String, HttpHeader> headers; /** * Create an empty HttpHeaders instance. */ public HttpHeaders() { headers = new HashMap<>(); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the map of initial headers */ public HttpHeaders(Map<String, String> headers) { this.headers = new HashMap<>(headers.size()); headers.forEach(this::set); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the collection of initial headers */ public HttpHeaders(Iterable<HttpHeader> headers) { this.headers = new HashMap<>(); for (final HttpHeader header : headers) { this.set(header.getName(), header.getValue()); } } /** * Create a HttpHeaders instance with an initial {@code size} empty headers * * @param initialCapacity the initial capacity of headers map. */ /** * Gets the number of headers in the collection. * * @return the number of headers in this collection. */ public int getSize() { return headers.size(); } /** * Sets a {@link HttpHeader header} with the given name and value. * * <p>If header with same name already exists then the value will be overwritten.</p> * * @param name the name * @param value the value * @return The updated HttpHeaders object * @deprecated Use {@link */ @Deprecated public HttpHeaders put(String name, String value) { return set(name, value); } /** * Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then * the value will be overwritten. If the given value is null, the header with the given name will be removed. * * @param name the name to set in the header. If it is null, this method will return with no changes to the headers. * @param value the value * @return The updated HttpHeaders object */ public HttpHeaders set(String name, String value) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (value == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, value)); } return this; } /** * Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given * values will be comma-separated when necessary. If a header with same name already exists then the * values will be overwritten. If the given values list is null, the header with the given name will be removed. * * @param name the name * @param values the values that will be comma-separated as appropriate * @return The updated HttpHeaders object */ public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (values == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, values)); } return this; } /** * Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling * {@code headers.forEach(this::set)}, and therefore the behavior is as specified in {@link * In other words, this will create a header for each key in the provided map, replacing or removing an existing * one, depending on the value. If the given values list is null, the header with the given name will be removed. * If the given name is already a header, it will be removed and replaced with the headers provided. * * @param headers a map containing keys representing header names, and keys representing the associated values. * @return The updated HttpHeaders object */ public HttpHeaders setAll(Map<String, List<String>> headers) { headers.forEach(this::set); return this; } /** * Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't * found. * * @param name the name of the header to find. * @return the header if found, null otherwise. */ public HttpHeader get(String name) { return headers.get(formatKey(name)); } /** * Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header * isn't found. * * @param name the name of the header to remove. * @return the header if removed, null otherwise. */ public HttpHeader remove(String name) { return headers.remove(formatKey(name)); } /** * Get the value for the provided header name. {@code Null} is returned if the header name isn't found. * * @param name the name of the header whose value is being retrieved. * @return the value of the header, or null if the header isn't found */ public String getValue(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValue(); } /** * Get the values for the provided header name. {@code Null} is returned if the header name isn't found. * * <p>This returns {@link * * @param name the name of the header whose value is being retrieved. * @return the values of the header, or null if the header isn't found */ public String[] getValues(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValues(); } private String formatKey(final String key) { return key.toLowerCase(Locale.ROOT); } /** * Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the * headers at the time of the toMap call. This map will not change as the underlying http headers change, and nor * will modifying the key or values contained in the map have any effect on the state of the http headers. * * <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly * recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present * on the returned Map class. For example, use the {@link * {@code httpHeaders.toMap().get(name)}.</p> * * @return the headers in a copied and unmodifiable form. */ public Map<String, String> toMap() { final Map<String, String> result = new HashMap<>(); for (final HttpHeader header : headers.values()) { result.put(header.getName(), header.getValue()); } return Collections.unmodifiableMap(result); } /** * {@inheritDoc} */ @Override public Iterator<HttpHeader> iterator() { return headers.values().iterator(); } /** * Get a {@link Stream} representation of the HttpHeader values in this instance. * * @return A {@link Stream} of all header values in this instance. */ public Stream<HttpHeader> stream() { return headers.values().stream(); } @Override public String toString() { return this.stream() .map(header -> header.getName() + "=" + header.getValue()) .collect(Collectors.joining(", ")); } }
Actually, let's ignore this and let HashMap handle throwing the error if the `initialCapicity` is invalid
public HttpHeaders(int initialCapacity) { this.headers = new HashMap<>(initialCapacity); }
this.headers = new HashMap<>(initialCapacity);
public HttpHeaders(int initialCapacity) { this.headers = new HashMap<>(initialCapacity); }
class HttpHeaders implements Iterable<HttpHeader> { private final Map<String, HttpHeader> headers; /** * Create an empty HttpHeaders instance. */ public HttpHeaders() { headers = new HashMap<>(); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the map of initial headers */ public HttpHeaders(Map<String, String> headers) { this.headers = new HashMap<>(headers.size()); headers.forEach(this::set); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the collection of initial headers */ public HttpHeaders(Iterable<HttpHeader> headers) { this.headers = new HashMap<>(); for (final HttpHeader header : headers) { this.set(header.getName(), header.getValue()); } } /** * Create a HttpHeaders instance with an initial {@code size} empty headers * * @param initialCapacity the initial capacity of headers map. */ /** * Gets the number of headers in the collection. * * @return the number of headers in this collection. */ public int getSize() { return headers.size(); } /** * Sets a {@link HttpHeader header} with the given name and value. * * <p>If header with same name already exists then the value will be overwritten.</p> * * @param name the name * @param value the value * @return The updated HttpHeaders object * @deprecated Use {@link */ @Deprecated public HttpHeaders put(String name, String value) { return set(name, value); } /** * Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then * the value will be overwritten. If the given value is null, the header with the given name will be removed. * * @param name the name to set in the header. If it is null, this method will return with no changes to the headers. * @param value the value * @return The updated HttpHeaders object */ public HttpHeaders set(String name, String value) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (value == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, value)); } return this; } /** * Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given * values will be comma-separated when necessary. If a header with same name already exists then the * values will be overwritten. If the given values list is null, the header with the given name will be removed. * * @param name the name * @param values the values that will be comma-separated as appropriate * @return The updated HttpHeaders object */ public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (values == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, values)); } return this; } /** * Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling * {@code headers.forEach(this::set)}, and therefore the behavior is as specified in {@link * In other words, this will create a header for each key in the provided map, replacing or removing an existing * one, depending on the value. If the given values list is null, the header with the given name will be removed. * If the given name is already a header, it will be removed and replaced with the headers provided. * * @param headers a map containing keys representing header names, and keys representing the associated values. * @return The updated HttpHeaders object */ public HttpHeaders setAll(Map<String, List<String>> headers) { headers.forEach(this::set); return this; } /** * Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't * found. * * @param name the name of the header to find. * @return the header if found, null otherwise. */ public HttpHeader get(String name) { return headers.get(formatKey(name)); } /** * Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header * isn't found. * * @param name the name of the header to remove. * @return the header if removed, null otherwise. */ public HttpHeader remove(String name) { return headers.remove(formatKey(name)); } /** * Get the value for the provided header name. {@code Null} is returned if the header name isn't found. * * @param name the name of the header whose value is being retrieved. * @return the value of the header, or null if the header isn't found */ public String getValue(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValue(); } /** * Get the values for the provided header name. {@code Null} is returned if the header name isn't found. * * <p>This returns {@link * * @param name the name of the header whose value is being retrieved. * @return the values of the header, or null if the header isn't found */ public String[] getValues(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValues(); } private String formatKey(final String key) { return key.toLowerCase(Locale.ROOT); } /** * Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the * headers at the time of the toMap call. This map will not change as the underlying http headers change, and nor * will modifying the key or values contained in the map have any effect on the state of the http headers. * * <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly * recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present * on the returned Map class. For example, use the {@link * {@code httpHeaders.toMap().get(name)}.</p> * * @return the headers in a copied and unmodifiable form. */ public Map<String, String> toMap() { final Map<String, String> result = new HashMap<>(); for (final HttpHeader header : headers.values()) { result.put(header.getName(), header.getValue()); } return Collections.unmodifiableMap(result); } /** * {@inheritDoc} */ @Override public Iterator<HttpHeader> iterator() { return headers.values().iterator(); } /** * Get a {@link Stream} representation of the HttpHeader values in this instance. * * @return A {@link Stream} of all header values in this instance. */ public Stream<HttpHeader> stream() { return headers.values().stream(); } @Override public String toString() { return this.stream() .map(header -> header.getName() + "=" + header.getValue()) .collect(Collectors.joining(", ")); } }
class HttpHeaders implements Iterable<HttpHeader> { private final Map<String, HttpHeader> headers; /** * Create an empty HttpHeaders instance. */ public HttpHeaders() { headers = new HashMap<>(); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the map of initial headers */ public HttpHeaders(Map<String, String> headers) { this.headers = new HashMap<>(headers.size()); headers.forEach(this::set); } /** * Create a HttpHeaders instance with the provided initial headers. * * @param headers the collection of initial headers */ public HttpHeaders(Iterable<HttpHeader> headers) { this.headers = new HashMap<>(); for (final HttpHeader header : headers) { this.set(header.getName(), header.getValue()); } } /** * Create a HttpHeaders instance with an initial {@code size} empty headers * * @param initialCapacity the initial capacity of headers map. */ /** * Gets the number of headers in the collection. * * @return the number of headers in this collection. */ public int getSize() { return headers.size(); } /** * Sets a {@link HttpHeader header} with the given name and value. * * <p>If header with same name already exists then the value will be overwritten.</p> * * @param name the name * @param value the value * @return The updated HttpHeaders object * @deprecated Use {@link */ @Deprecated public HttpHeaders put(String name, String value) { return set(name, value); } /** * Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then * the value will be overwritten. If the given value is null, the header with the given name will be removed. * * @param name the name to set in the header. If it is null, this method will return with no changes to the headers. * @param value the value * @return The updated HttpHeaders object */ public HttpHeaders set(String name, String value) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (value == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, value)); } return this; } /** * Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given * values will be comma-separated when necessary. If a header with same name already exists then the * values will be overwritten. If the given values list is null, the header with the given name will be removed. * * @param name the name * @param values the values that will be comma-separated as appropriate * @return The updated HttpHeaders object */ public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } String caseInsensitiveName = formatKey(name); if (values == null) { remove(caseInsensitiveName); } else { headers.put(caseInsensitiveName, new HttpHeader(name, values)); } return this; } /** * Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling * {@code headers.forEach(this::set)}, and therefore the behavior is as specified in {@link * In other words, this will create a header for each key in the provided map, replacing or removing an existing * one, depending on the value. If the given values list is null, the header with the given name will be removed. * If the given name is already a header, it will be removed and replaced with the headers provided. * * @param headers a map containing keys representing header names, and keys representing the associated values. * @return The updated HttpHeaders object */ public HttpHeaders setAll(Map<String, List<String>> headers) { headers.forEach(this::set); return this; } /** * Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't * found. * * @param name the name of the header to find. * @return the header if found, null otherwise. */ public HttpHeader get(String name) { return headers.get(formatKey(name)); } /** * Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header * isn't found. * * @param name the name of the header to remove. * @return the header if removed, null otherwise. */ public HttpHeader remove(String name) { return headers.remove(formatKey(name)); } /** * Get the value for the provided header name. {@code Null} is returned if the header name isn't found. * * @param name the name of the header whose value is being retrieved. * @return the value of the header, or null if the header isn't found */ public String getValue(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValue(); } /** * Get the values for the provided header name. {@code Null} is returned if the header name isn't found. * * <p>This returns {@link * * @param name the name of the header whose value is being retrieved. * @return the values of the header, or null if the header isn't found */ public String[] getValues(String name) { final HttpHeader header = get(name); return header == null ? null : header.getValues(); } private String formatKey(final String key) { return key.toLowerCase(Locale.ROOT); } /** * Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the * headers at the time of the toMap call. This map will not change as the underlying http headers change, and nor * will modifying the key or values contained in the map have any effect on the state of the http headers. * * <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly * recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present * on the returned Map class. For example, use the {@link * {@code httpHeaders.toMap().get(name)}.</p> * * @return the headers in a copied and unmodifiable form. */ public Map<String, String> toMap() { final Map<String, String> result = new HashMap<>(); for (final HttpHeader header : headers.values()) { result.put(header.getName(), header.getValue()); } return Collections.unmodifiableMap(result); } /** * {@inheritDoc} */ @Override public Iterator<HttpHeader> iterator() { return headers.values().iterator(); } /** * Get a {@link Stream} representation of the HttpHeader values in this instance. * * @return A {@link Stream} of all header values in this instance. */ public Stream<HttpHeader> stream() { return headers.values().stream(); } @Override public String toString() { return this.stream() .map(header -> header.getName() + "=" + header.getValue()) .collect(Collectors.joining(", ")); } }
Can we delete this, and add java doc in it's parent method?
public String getAlgorithmName() { return RSA_ALGORITHM; }
public String getAlgorithmName() { return "RSASSA-PSS"; }
class KeyVaultKeyLessRsaSignature extends AbstractKeyVaultKeyLessSignature { /** * The default algorithm for certificate sign which Key Type is RSA in key Vault will be used */ public static final String RSA_ALGORITHM = "RSASSA-PSS"; /** * Construct a new KeyVaultKeyLessRsaSignature */ public KeyVaultKeyLessRsaSignature() { super(); this.messageDigest = null; } @Override protected byte[] engineSign() { byte[] mHash = getDigestValue(); String encode = Base64.getEncoder().encodeToString(mHash); return keyVaultClient.getSignedWithPrivateKey("PS256", encode, keyId); } @Override protected void engineSetParameter(AlgorithmParameterSpec params) throws InvalidAlgorithmParameterException { if (params != null && !(params instanceof PSSParameterSpec)) { throw new InvalidAlgorithmParameterException("No parameter accepted"); } PSSParameterSpec signatureParameters = (PSSParameterSpec) params; String newHashAlg = signatureParameters != null ? signatureParameters.getDigestAlgorithm() : null; if ((this.messageDigest == null) || !(this.messageDigest.getAlgorithm().equalsIgnoreCase(newHashAlg))) { try { this.messageDigest = MessageDigest.getInstance(newHashAlg); } catch (NoSuchAlgorithmException exception) { throw new InvalidAlgorithmParameterException("Unsupported digest algorithm " + newHashAlg, exception); } } } @Override }
class KeyVaultKeyLessRsaSignature extends AbstractKeyVaultKeyLessSignature { /** * Construct a new KeyVaultKeyLessRsaSignature */ public KeyVaultKeyLessRsaSignature() { super(); this.messageDigest = null; } @Override protected byte[] engineSign() { byte[] mHash = getDigestValue(); String encode = Base64.getEncoder().encodeToString(mHash); return keyVaultClient.getSignedWithPrivateKey("PS256", encode, keyId); } @Override protected void engineSetParameter(AlgorithmParameterSpec params) throws InvalidAlgorithmParameterException { if (params != null && !(params instanceof PSSParameterSpec)) { throw new InvalidAlgorithmParameterException("No parameter accepted"); } PSSParameterSpec signatureParameters = (PSSParameterSpec) params; String newHashAlg = signatureParameters != null ? signatureParameters.getDigestAlgorithm() : null; if ((this.messageDigest == null) || !(this.messageDigest.getAlgorithm().equalsIgnoreCase(newHashAlg))) { try { this.messageDigest = MessageDigest.getInstance(newHashAlg); } catch (NoSuchAlgorithmException exception) { throw new InvalidAlgorithmParameterException("Unsupported digest algorithm " + newHashAlg, exception); } } } @Override }
It's possible the instrumentationManager could be null?
public ServiceBusQueueHealthIndicator(ServiceBusQueueOperation serviceBusQueueOperation) { super("Service bus health check failed"); this.instrumentationManager = serviceBusQueueOperation.getInstrumentationManager(); }
this.instrumentationManager = serviceBusQueueOperation.getInstrumentationManager();
public ServiceBusQueueHealthIndicator(ServiceBusQueueOperation serviceBusQueueOperation) { super("Service bus health check failed"); this.instrumentationManager = serviceBusQueueOperation.getInstrumentationManager(); }
class ServiceBusQueueHealthIndicator extends AbstractHealthIndicator { private final InstrumentationManager instrumentationManager; @Override protected void doHealthCheck(Health.Builder builder) { if (instrumentationManager.getHealthInstrumentations().isEmpty()) { builder.unknown(); return; } if (instrumentationManager.getHealthInstrumentations().stream() .allMatch(Instrumentation::isUp)) { builder.up(); return; } if (instrumentationManager.getHealthInstrumentations().stream() .allMatch(Instrumentation::isOutOfService)) { builder.outOfService(); return; } builder.down(); instrumentationManager.getHealthInstrumentations().stream() .filter(instrumentation -> !instrumentation.isStarted()) .forEach(instrumentation -> builder .withDetail(instrumentation.getName() + ":" + instrumentation.getType().getTypeName(), instrumentation.getStartException())); } }
class ServiceBusQueueHealthIndicator extends AbstractHealthIndicator { private final InstrumentationManager instrumentationManager; @Override protected void doHealthCheck(Health.Builder builder) { if (instrumentationManager == null || instrumentationManager.getHealthInstrumentations().isEmpty()) { builder.unknown(); return; } if (instrumentationManager.getHealthInstrumentations().stream() .allMatch(Instrumentation::isUp)) { builder.up(); return; } if (instrumentationManager.getHealthInstrumentations().stream() .allMatch(Instrumentation::isOutOfService)) { builder.outOfService(); return; } builder.down(); instrumentationManager.getHealthInstrumentations().stream() .filter(instrumentation -> !instrumentation.isStarted()) .forEach(instrumentation -> builder .withDetail(instrumentation.getName() + ":" + instrumentation.getType().getTypeName(), instrumentation.getStartException())); } }
`customPolicies` can be null. If it is null, no additional policies are added to the pipeline.
public CommunicationRelayAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'ednpoint' cannot be null."); Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null."); return new CommunicationRelayAsyncClient(createServiceImpl()); }
Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null.");
public CommunicationRelayAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return new CommunicationRelayAsyncClient(createServiceImpl()); }
class CommunicationRelayClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String COMMUNICATION_IDENTITY_PROPERTIES = "azure-communication-networktravesal.properties"; private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class); private String endpoint; private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; private ClientOptions clientOptions; private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES); private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); /** * Set endpoint of the service * * @param endpoint url of the service * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /** * Set endpoint of the service * * @param pipeline HttpPipeline to use, if a pipeline is not * supplied, the credential and httpClient fields must be set * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) { this.azureKeyCredential = keyCredential; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Set httpClient to use * * @param httpClient httpClient to use, overridden by the pipeline * field. * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Apply additional HttpPipelinePolicy * * @param customPolicy HttpPipelinePolicy object to be applied after * AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(customPolicy); return this; } /** * Sets the client options for all the requests made through the client. * * @param clientOptions {@link ClientOptions}. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated CommunicationRelayClientBuilder object */ public CommunicationRelayClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated CommunicationRelayClientBuilder object */ public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = logOptions; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * @param retryPolicy User's retry policy applied to each request. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests. * @return the updated CommunicationRelayClientBuilder object */ public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) { return this; } /** * Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationRelayAsyncClient instance */ /** * Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationRelayClient instance */ public CommunicationRelayClient buildClient() { Objects.requireNonNull(endpoint, "'ednpoint' cannot be null."); Objects.requireNonNull(customPolicies, "'customPolicies' cannot be null."); return new CommunicationRelayClient(buildAsyncClient()); } private CommunicationNetworkingClientImpl createServiceImpl() { HttpPipeline builderPipeline = this.pipeline; if (this.pipeline == null) { builderPipeline = createHttpPipeline(httpClient, createHttpPipelineAuthPolicy(), customPolicies); } CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder(); clientBuilder.endpoint(endpoint) .pipeline(builderPipeline); return clientBuilder.buildClient(); } private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null && this.azureKeyCredential != null) { throw logger.logExceptionAsError( new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used.")); } if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> customPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); applyRequiredPolicies(policies, authorizationPolicy); if (customPolicies != null && customPolicies.size() > 0) { policies.addAll(customPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) { String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions; HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions; String applicationId = null; if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) { applicationId = buildClientOptions.getApplicationId(); } else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) { applicationId = buildLogOptions.getApplicationId(); } policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration)); policies.add(new RequestIdPolicy()); policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy); policies.add(new CookiePolicy()); policies.add(authorizationPolicy); policies.add(new HttpLoggingPolicy(httpLogOptions)); } }
class CommunicationRelayClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES = "azure-communication-networktraversal.properties"; private final ClientLogger logger = new ClientLogger(CommunicationRelayClientBuilder.class); private String endpoint; private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private HttpPipeline pipeline; private RetryPolicy retryPolicy; private Configuration configuration; private ClientOptions clientOptions; private String connectionString; private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_NETWORK_TRAVERSAL_PROPERTIES); private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>(); /** * Set endpoint of the service * * @param endpoint url of the service * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /** * Set endpoint of the service * * @param pipeline HttpPipeline to use, if a pipeline is not * supplied, the credential and httpClient fields must be set * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder credential(AzureKeyCredential keyCredential) { this.azureKeyCredential = keyCredential; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder connectionString(String connectionString) { CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Set httpClient to use * * @param httpClient httpClient to use, overridden by the pipeline * field. * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Apply additional HttpPipelinePolicy * * @param customPolicy HttpPipelinePolicy object to be applied after * AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy * @return CommunicationRelayClientBuilder */ public CommunicationRelayClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(customPolicy); return this; } /** * Sets the client options for all the requests made through the client. * * @param clientOptions {@link ClientOptions}. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated CommunicationRelayClientBuilder object */ public CommunicationRelayClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated CommunicationRelayClientBuilder object */ public CommunicationRelayClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = logOptions; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * @param retryPolicy User's retry policy applied to each request. * @return The updated {@link CommunicationRelayClientBuilder} object. */ public CommunicationRelayClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link CommunicationRelayServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link CommunicationRelayServiceVersion} of the service to be used when making requests. * @return the updated CommunicationRelayClientBuilder object */ public CommunicationRelayClientBuilder serviceVersion(CommunicationRelayServiceVersion version) { return this; } /** * Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationRelayAsyncClient instance */ /** * Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationRelayClient instance */ public CommunicationRelayClient buildClient() { return new CommunicationRelayClient(buildAsyncClient()); } private CommunicationNetworkingClientImpl createServiceImpl() { HttpPipeline builderPipeline = this.pipeline; if (this.pipeline == null) { builderPipeline = createHttpPipeline(httpClient, createHttpPipelineAuthPolicy(), customPolicies); } CommunicationNetworkingClientImplBuilder clientBuilder = new CommunicationNetworkingClientImplBuilder(); clientBuilder.endpoint(endpoint) .pipeline(builderPipeline); return clientBuilder.buildClient(); } private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null && this.azureKeyCredential != null) { throw logger.logExceptionAsError( new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used.")); } if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> customPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); applyRequiredPolicies(policies, authorizationPolicy); if (customPolicies != null && customPolicies.size() > 0) { policies.addAll(customPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies, HttpPipelinePolicy authorizationPolicy) { String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions; HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions; String applicationId = null; if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) { applicationId = buildClientOptions.getApplicationId(); } else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) { applicationId = buildLogOptions.getApplicationId(); } policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration)); policies.add(new RequestIdPolicy()); policies.add(this.retryPolicy == null ? new RetryPolicy() : this.retryPolicy); policies.add(new CookiePolicy()); policies.add(authorizationPolicy); policies.add(new HttpLoggingPolicy(httpLogOptions)); } }
We should allow for setting `null` values here and only validate in build*Client method.
public SearchClientBuilder credential(AzureKeyCredential credential) { this.azureKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; }
this.azureKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
public SearchClientBuilder credential(AzureKeyCredential credential) { this.azureKeyCredential = credential; return this; }
class SearchClientBuilder { private static final boolean DEFAULT_AUTO_FLUSH = true; private static final int DEFAULT_INITIAL_BATCH_ACTION_COUNT = 512; private static final Duration DEFAULT_FLUSH_INTERVAL = Duration.ofSeconds(60); private static final int DEFAULT_MAX_RETRIES_PER_ACTION = 3; private static final Duration DEFAULT_THROTTLING_DELAY = Duration.ofMillis(800); private static final Duration DEFAULT_MAX_THROTTLING_DELAY = Duration.ofMinutes(1); private final ClientLogger logger = new ClientLogger(SearchClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private SearchServiceVersion serviceVersion; private String endpoint; private HttpClient httpClient; private HttpPipeline httpPipeline; private ClientOptions clientOptions; private HttpLogOptions httpLogOptions; private Configuration configuration; private String indexName; private RetryPolicy retryPolicy; private JsonSerializer jsonSerializer; /** * Creates a builder instance that is able to configure and construct {@link SearchClient SearchClients} and {@link * SearchAsyncClient SearchAsyncClients}. */ public SearchClientBuilder() { } /** * Creates a {@link SearchClient} based on options set in the builder. Every time {@code buildClient()} is called a * new instance of {@link SearchClient} is created. * <p> * If {@link * endpoint}, and {@link * builder settings are ignored. * * @return A SearchClient with the options set from the builder. * @throws NullPointerException If {@code indexName} or {@code endpoint} are null. */ public SearchClient buildClient() { return new SearchClient(buildAsyncClient()); } /** * Creates a {@link SearchAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link SearchAsyncClient} is created. * <p> * If {@link * endpoint}, and {@link * other builder settings are ignored. * * @return A SearchClient with the options set from the builder. * @throws NullPointerException If {@code indexName} or {@code endpoint} are null. */ public SearchAsyncClient buildAsyncClient() { validateIndexNameAndEndpoint(); SearchServiceVersion buildVersion = (serviceVersion == null) ? SearchServiceVersion.getLatest() : serviceVersion; HttpPipeline pipeline = getHttpPipeline(); return new SearchAsyncClient(endpoint, indexName, buildVersion, pipeline, jsonSerializer, Utility.buildRestClient(endpoint, indexName, pipeline, getDefaultSerializerAdapter())); } /** * Create a new instance of {@link SearchIndexingBufferedSenderBuilder} used to configure {@link * SearchIndexingBufferedSender SearchIndexingBufferedSenders} and {@link SearchIndexingBufferedAsyncSender * SearchIndexingBufferedAsyncSenders}. * * @param documentType The {@link TypeReference} representing the document type associated with the sender. * @param <T> The type of the document that the buffered sender will use. * @return A new instance of {@link SearchIndexingBufferedSenderBuilder}. */ public <T> SearchIndexingBufferedSenderBuilder<T> bufferedSender(TypeReference<T> documentType) { return new SearchIndexingBufferedSenderBuilder<>(); } private void validateIndexNameAndEndpoint() { Objects.requireNonNull(indexName, "'indexName' cannot be null."); Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); } private HttpPipeline getHttpPipeline() { if (httpPipeline != null) { return httpPipeline; } return Utility.buildHttpPipeline(clientOptions, httpLogOptions, configuration, retryPolicy, azureKeyCredential, tokenCredential, perCallPolicies, perRetryPolicies, httpClient, logger); } /** * Sets the service endpoint for the Azure Cognitive Search instance. * * @param endpoint The URL of the Azure Cognitive Search instance. * @return The updated SearchClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public SearchClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * @param credential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated SearchClientBuilder object. * @throws NullPointerException If {@code credential} is null. * @throws IllegalArgumentException If {@link AzureKeyCredential */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param credential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated SearchClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public SearchClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the name of the index. * * @param indexName Name of the index. * @return The updated SearchClientBuilder object. * @throws IllegalArgumentException If {@code indexName} is null or empty. */ public SearchClientBuilder indexName(String indexName) { if (CoreUtils.isNullOrEmpty(indexName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'indexName' cannot be null or empty.")); } this.indexName = indexName; return this; } /** * Sets the logging configuration for HTTP requests and responses. * <p> * If logging configurations aren't provided HTTP requests and responses won't be logged. * * @param logOptions The logging configuration for HTTP requests and responses. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Gets the default Azure Search headers and query parameters allow list. * * @return The default {@link HttpLogOptions} allow list. */ public static HttpLogOptions getDefaultLogOptions() { return Constants.DEFAULT_LOG_OPTIONS_SUPPLIER.get(); } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Adds a pipeline policy to apply to each request sent. * <p> * This method may be called multiple times, each time it is called the policy will be added to the end of added * policy list. All policies will be added after the retry policy. * * @param policy The pipeline policies to added to the policy list. * @return The updated SearchClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public SearchClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Custom JSON serializer that is used to handle model types that are not contained in the Azure Search Documents * library. * * @param jsonSerializer The serializer to serialize user defined models. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder serializer(JsonSerializer jsonSerializer) { this.jsonSerializer = jsonSerializer; return this; } /** * Sets the HTTP client to use for sending requests and receiving responses. * * @param client The HTTP client that will handle sending requests and receiving responses. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store that will be used. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that will attempt to retry requests when needed. * <p> * A default retry policy will be supplied if one isn't provided. * * @param retryPolicy The {@link RetryPolicy} that will attempt to retry requests when needed. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link SearchServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, {@link SearchServiceVersion * the default is used, updating to a newer client library may implicitly use a newer version of the service. * * @param serviceVersion The version of the service to be used when making requests. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder serviceVersion(SearchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * This class provides a fluent builder API to help aid the configuration and instantiation of {@link * SearchIndexingBufferedSender SearchIndexingBufferedSenders} and {@link SearchIndexingBufferedAsyncSender * SearchIndexingBufferedAsyncSenders}. Call {@link * construct an instance of the desired sender. * * @param <T> The type of the document that the buffered sender will use. * @see SearchIndexingBufferedSender * @see SearchIndexingBufferedAsyncSender */ @ServiceClientBuilder(serviceClients = { SearchIndexingBufferedSender.class, SearchIndexingBufferedAsyncSender.class }) public final class SearchIndexingBufferedSenderBuilder<T> { private final ClientLogger logger = new ClientLogger(SearchIndexingBufferedSenderBuilder.class); private Function<T, String> documentKeyRetriever; private boolean autoFlush = DEFAULT_AUTO_FLUSH; private Duration autoFlushInterval = DEFAULT_FLUSH_INTERVAL; private int initialBatchActionCount = DEFAULT_INITIAL_BATCH_ACTION_COUNT; private int maxRetriesPerAction = DEFAULT_MAX_RETRIES_PER_ACTION; private Duration throttlingDelay = DEFAULT_THROTTLING_DELAY; private Duration maxThrottlingDelay = DEFAULT_MAX_THROTTLING_DELAY; private Consumer<OnActionAddedOptions<T>> onActionAddedConsumer; private Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer; private Consumer<OnActionErrorOptions<T>> onActionErrorConsumer; private Consumer<OnActionSentOptions<T>> onActionSentConsumer; private SearchIndexingBufferedSenderBuilder() { } /** * Creates a {@link SearchIndexingBufferedSender} based on options set in the builder. Every time this is called * a new instance of {@link SearchIndexingBufferedSender} is created. * * @return A SearchIndexingBufferedSender with the options set from the builder. * @throws NullPointerException If {@code indexName}, {@code endpoint}, or {@code documentKeyRetriever} are * null. */ public SearchIndexingBufferedSender<T> buildSender() { return new SearchIndexingBufferedSender<>(buildAsyncSender()); } /** * Creates a {@link SearchIndexingBufferedAsyncSender} based on options set in the builder. Every time this is * called a new instance of {@link SearchIndexingBufferedAsyncSender} is created. * * @return A SearchIndexingBufferedAsyncSender with the options set from the builder. * @throws NullPointerException If {@code indexName}, {@code endpoint}, or {@code documentKeyRetriever} are * null. */ public SearchIndexingBufferedAsyncSender<T> buildAsyncSender() { validateIndexNameAndEndpoint(); Objects.requireNonNull(documentKeyRetriever, "'documentKeyRetriever' cannot be null"); return new SearchIndexingBufferedAsyncSender<>(buildRestClient(endpoint, indexName, getHttpPipeline(), getDefaultSerializerAdapter()), jsonSerializer, documentKeyRetriever, autoFlush, autoFlushInterval, initialBatchActionCount, maxRetriesPerAction, throttlingDelay, maxThrottlingDelay, onActionAddedConsumer, onActionSucceededConsumer, onActionErrorConsumer, onActionSentConsumer); } /** * Sets the flag determining whether a buffered sender will automatically flush its document batch based on the * configurations of {@link * * @param autoFlush Flag determining whether a buffered sender will automatically flush. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> autoFlush(boolean autoFlush) { this.autoFlush = autoFlush; return this; } /** * Sets the duration between a buffered sender sending documents to be indexed. * <p> * The buffered sender will reset the duration when documents are sent for indexing, either by reaching {@link * * <p> * If {@code autoFlushInterval} is negative or zero and {@link * sender will only flush when {@link * * @param autoFlushInterval Duration between document batches being sent for indexing. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws NullPointerException If {@code autoFlushInterval} is null. */ public SearchIndexingBufferedSenderBuilder<T> autoFlushInterval(Duration autoFlushInterval) { Objects.requireNonNull(autoFlushInterval, "'autoFlushInterval' cannot be null."); this.autoFlushInterval = autoFlushInterval; return this; } /** * Sets the number of documents before a buffered sender will send the batch to be indexed. * <p> * This will only trigger a batch to be sent automatically if {@link * value is {@code 512}. * * @param initialBatchActionCount The number of documents in a batch that will trigger it to be indexed. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code batchSize} is less than one. */ public SearchIndexingBufferedSenderBuilder<T> initialBatchActionCount(int initialBatchActionCount) { if (initialBatchActionCount < 1) { throw logger.logExceptionAsError(new IllegalArgumentException("'batchSize' cannot be less than one.")); } this.initialBatchActionCount = initialBatchActionCount; return this; } /** * Sets the number of times an action will retry indexing before it is considered failed. * <p> * Documents are only retried on retryable status codes. * <p> * Default value is {@code 3}. * * @param maxRetriesPerAction The number of times a document will retry indexing before it is considered * failed. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code maxRetriesPerAction} is less than one. */ public SearchIndexingBufferedSenderBuilder<T> maxRetriesPerAction(int maxRetriesPerAction) { if (maxRetriesPerAction < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxRetries' cannot be less than one.")); } this.maxRetriesPerAction = maxRetriesPerAction; return this; } /** * Sets the initial duration that requests will be delayed when the service is throttling. * <p> * Default value is {@code Duration.ofMillis(800)}. * * @param throttlingDelay The initial duration requests will delay when the service is throttling. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code throttlingDelay.isNegative()} or {@code throttlingDelay.isZero()} * is true. * @throws NullPointerException If {@code throttlingDelay} is null. */ public SearchIndexingBufferedSenderBuilder<T> throttlingDelay(Duration throttlingDelay) { Objects.requireNonNull(throttlingDelay, "'throttlingDelay' cannot be null."); if (throttlingDelay.isNegative() || throttlingDelay.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'throttlingDelay' cannot be negative or zero.")); } this.throttlingDelay = throttlingDelay; return this; } /** * Sets the maximum duration that requests will be delayed when the service is throttling. * <p> * If {@code maxThrottlingDelay} is less than {@link * * <p> * Default value is {@code Duration.ofMinutes(1)}. * * @param maxThrottlingDelay The maximum duration requests will delay when the service is throttling. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code maxThrottlingDelay.isNegative()} or {@code * maxThrottlingDelay.isZero()} is true. * @throws NullPointerException If {@code maxThrottlingDelay} is null. */ public SearchIndexingBufferedSenderBuilder<T> maxThrottlingDelay(Duration maxThrottlingDelay) { Objects.requireNonNull(maxThrottlingDelay, "'maxThrottlingDelay' cannot be null."); if (maxThrottlingDelay.isNegative() || maxThrottlingDelay.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxThrottlingDelay' cannot be negative or zero.")); } this.maxThrottlingDelay = maxThrottlingDelay; return this; } /** * Callback hook for when a document indexing action has been added to a batch queued. * * @param onActionAddedConsumer The {@link Consumer} that is called when a document has been added to a batch * queue. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionAdded( Consumer<OnActionAddedOptions<T>> onActionAddedConsumer) { this.onActionAddedConsumer = onActionAddedConsumer; return this; } /** * Sets the callback hook for when a document indexing action has successfully completed indexing. * * @param onActionSucceededConsumer The {@link Consumer} that is called when a document has been successfully * indexing. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionSucceeded( Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer) { this.onActionSucceededConsumer = onActionSucceededConsumer; return this; } /** * Sets the callback hook for when a document indexing action has failed to index and isn't retryable. * * @param onActionErrorConsumer The {@link Consumer} that is called when a document has failed to index and * isn't retryable. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionError( Consumer<OnActionErrorOptions<T>> onActionErrorConsumer) { this.onActionErrorConsumer = onActionErrorConsumer; return this; } /** * Sets the callback hook for when a document indexing has been sent in a batching request. * * @param onActionSentConsumer The {@link Consumer} that is called when a document has been sent in a batch * request. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionSent( Consumer<OnActionSentOptions<T>> onActionSentConsumer) { this.onActionSentConsumer = onActionSentConsumer; return this; } /** * Sets the function that retrieves the key value from a document. * * @param documentKeyRetriever Function that retrieves the key from an {@link IndexAction}. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws NullPointerException If {@code documentKeyRetriever} is null. */ public SearchIndexingBufferedSenderBuilder<T> documentKeyRetriever(Function<T, String> documentKeyRetriever) { this.documentKeyRetriever = Objects.requireNonNull(documentKeyRetriever, "'documentKeyRetriever' cannot be null"); return this; } } }
class SearchClientBuilder { private static final boolean DEFAULT_AUTO_FLUSH = true; private static final int DEFAULT_INITIAL_BATCH_ACTION_COUNT = 512; private static final Duration DEFAULT_FLUSH_INTERVAL = Duration.ofSeconds(60); private static final int DEFAULT_MAX_RETRIES_PER_ACTION = 3; private static final Duration DEFAULT_THROTTLING_DELAY = Duration.ofMillis(800); private static final Duration DEFAULT_MAX_THROTTLING_DELAY = Duration.ofMinutes(1); private final ClientLogger logger = new ClientLogger(SearchClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private SearchServiceVersion serviceVersion; private String endpoint; private HttpClient httpClient; private HttpPipeline httpPipeline; private ClientOptions clientOptions; private HttpLogOptions httpLogOptions; private Configuration configuration; private String indexName; private RetryPolicy retryPolicy; private JsonSerializer jsonSerializer; /** * Creates a builder instance that is able to configure and construct {@link SearchClient SearchClients} and {@link * SearchAsyncClient SearchAsyncClients}. */ public SearchClientBuilder() { } /** * Creates a {@link SearchClient} based on options set in the builder. Every time {@code buildClient()} is called a * new instance of {@link SearchClient} is created. * <p> * If {@link * endpoint}, and {@link * builder settings are ignored. * * @return A SearchClient with the options set from the builder. * @throws NullPointerException If {@code indexName} or {@code endpoint} are null. */ public SearchClient buildClient() { return new SearchClient(buildAsyncClient()); } /** * Creates a {@link SearchAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link SearchAsyncClient} is created. * <p> * If {@link * endpoint}, and {@link * other builder settings are ignored. * * @return A SearchClient with the options set from the builder. * @throws NullPointerException If {@code indexName} or {@code endpoint} are null. */ public SearchAsyncClient buildAsyncClient() { validateIndexNameAndEndpoint(); SearchServiceVersion buildVersion = (serviceVersion == null) ? SearchServiceVersion.getLatest() : serviceVersion; HttpPipeline pipeline = getHttpPipeline(); return new SearchAsyncClient(endpoint, indexName, buildVersion, pipeline, jsonSerializer, Utility.buildRestClient(endpoint, indexName, pipeline, getDefaultSerializerAdapter())); } /** * Create a new instance of {@link SearchIndexingBufferedSenderBuilder} used to configure {@link * SearchIndexingBufferedSender SearchIndexingBufferedSenders} and {@link SearchIndexingBufferedAsyncSender * SearchIndexingBufferedAsyncSenders}. * * @param documentType The {@link TypeReference} representing the document type associated with the sender. * @param <T> The type of the document that the buffered sender will use. * @return A new instance of {@link SearchIndexingBufferedSenderBuilder}. */ public <T> SearchIndexingBufferedSenderBuilder<T> bufferedSender(TypeReference<T> documentType) { return new SearchIndexingBufferedSenderBuilder<>(); } private void validateIndexNameAndEndpoint() { Objects.requireNonNull(indexName, "'indexName' cannot be null."); Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); } private HttpPipeline getHttpPipeline() { if (httpPipeline != null) { return httpPipeline; } return Utility.buildHttpPipeline(clientOptions, httpLogOptions, configuration, retryPolicy, azureKeyCredential, tokenCredential, perCallPolicies, perRetryPolicies, httpClient, logger); } /** * Sets the service endpoint for the Azure Cognitive Search instance. * * @param endpoint The URL of the Azure Cognitive Search instance. * @return The updated SearchClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public SearchClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * @param credential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated SearchClientBuilder object. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param credential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; return this; } /** * Sets the name of the index. * * @param indexName Name of the index. * @return The updated SearchClientBuilder object. * @throws IllegalArgumentException If {@code indexName} is null or empty. */ public SearchClientBuilder indexName(String indexName) { if (CoreUtils.isNullOrEmpty(indexName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'indexName' cannot be null or empty.")); } this.indexName = indexName; return this; } /** * Sets the logging configuration for HTTP requests and responses. * <p> * If logging configurations aren't provided HTTP requests and responses won't be logged. * * @param logOptions The logging configuration for HTTP requests and responses. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Gets the default Azure Search headers and query parameters allow list. * * @return The default {@link HttpLogOptions} allow list. */ public static HttpLogOptions getDefaultLogOptions() { return Constants.DEFAULT_LOG_OPTIONS_SUPPLIER.get(); } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Adds a pipeline policy to apply to each request sent. * <p> * This method may be called multiple times, each time it is called the policy will be added to the end of added * policy list. All policies will be added after the retry policy. * * @param policy The pipeline policies to added to the policy list. * @return The updated SearchClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public SearchClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Custom JSON serializer that is used to handle model types that are not contained in the Azure Search Documents * library. * * @param jsonSerializer The serializer to serialize user defined models. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder serializer(JsonSerializer jsonSerializer) { this.jsonSerializer = jsonSerializer; return this; } /** * Sets the HTTP client to use for sending requests and receiving responses. * * @param client The HTTP client that will handle sending requests and receiving responses. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store that will be used. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that will attempt to retry requests when needed. * <p> * A default retry policy will be supplied if one isn't provided. * * @param retryPolicy The {@link RetryPolicy} that will attempt to retry requests when needed. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link SearchServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, {@link SearchServiceVersion * the default is used, updating to a newer client library may implicitly use a newer version of the service. * * @param serviceVersion The version of the service to be used when making requests. * @return The updated SearchClientBuilder object. */ public SearchClientBuilder serviceVersion(SearchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * This class provides a fluent builder API to help aid the configuration and instantiation of {@link * SearchIndexingBufferedSender SearchIndexingBufferedSenders} and {@link SearchIndexingBufferedAsyncSender * SearchIndexingBufferedAsyncSenders}. Call {@link * construct an instance of the desired sender. * * @param <T> The type of the document that the buffered sender will use. * @see SearchIndexingBufferedSender * @see SearchIndexingBufferedAsyncSender */ @ServiceClientBuilder(serviceClients = { SearchIndexingBufferedSender.class, SearchIndexingBufferedAsyncSender.class }) public final class SearchIndexingBufferedSenderBuilder<T> { private final ClientLogger logger = new ClientLogger(SearchIndexingBufferedSenderBuilder.class); private Function<T, String> documentKeyRetriever; private boolean autoFlush = DEFAULT_AUTO_FLUSH; private Duration autoFlushInterval = DEFAULT_FLUSH_INTERVAL; private int initialBatchActionCount = DEFAULT_INITIAL_BATCH_ACTION_COUNT; private int maxRetriesPerAction = DEFAULT_MAX_RETRIES_PER_ACTION; private Duration throttlingDelay = DEFAULT_THROTTLING_DELAY; private Duration maxThrottlingDelay = DEFAULT_MAX_THROTTLING_DELAY; private Consumer<OnActionAddedOptions<T>> onActionAddedConsumer; private Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer; private Consumer<OnActionErrorOptions<T>> onActionErrorConsumer; private Consumer<OnActionSentOptions<T>> onActionSentConsumer; private SearchIndexingBufferedSenderBuilder() { } /** * Creates a {@link SearchIndexingBufferedSender} based on options set in the builder. Every time this is called * a new instance of {@link SearchIndexingBufferedSender} is created. * * @return A SearchIndexingBufferedSender with the options set from the builder. * @throws NullPointerException If {@code indexName}, {@code endpoint}, or {@code documentKeyRetriever} are * null. */ public SearchIndexingBufferedSender<T> buildSender() { return new SearchIndexingBufferedSender<>(buildAsyncSender()); } /** * Creates a {@link SearchIndexingBufferedAsyncSender} based on options set in the builder. Every time this is * called a new instance of {@link SearchIndexingBufferedAsyncSender} is created. * * @return A SearchIndexingBufferedAsyncSender with the options set from the builder. * @throws NullPointerException If {@code indexName}, {@code endpoint}, or {@code documentKeyRetriever} are * null. */ public SearchIndexingBufferedAsyncSender<T> buildAsyncSender() { validateIndexNameAndEndpoint(); Objects.requireNonNull(documentKeyRetriever, "'documentKeyRetriever' cannot be null"); return new SearchIndexingBufferedAsyncSender<>(buildRestClient(endpoint, indexName, getHttpPipeline(), getDefaultSerializerAdapter()), jsonSerializer, documentKeyRetriever, autoFlush, autoFlushInterval, initialBatchActionCount, maxRetriesPerAction, throttlingDelay, maxThrottlingDelay, onActionAddedConsumer, onActionSucceededConsumer, onActionErrorConsumer, onActionSentConsumer); } /** * Sets the flag determining whether a buffered sender will automatically flush its document batch based on the * configurations of {@link * * @param autoFlush Flag determining whether a buffered sender will automatically flush. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> autoFlush(boolean autoFlush) { this.autoFlush = autoFlush; return this; } /** * Sets the duration between a buffered sender sending documents to be indexed. * <p> * The buffered sender will reset the duration when documents are sent for indexing, either by reaching {@link * * <p> * If {@code autoFlushInterval} is negative or zero and {@link * sender will only flush when {@link * * @param autoFlushInterval Duration between document batches being sent for indexing. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws NullPointerException If {@code autoFlushInterval} is null. */ public SearchIndexingBufferedSenderBuilder<T> autoFlushInterval(Duration autoFlushInterval) { Objects.requireNonNull(autoFlushInterval, "'autoFlushInterval' cannot be null."); this.autoFlushInterval = autoFlushInterval; return this; } /** * Sets the number of documents before a buffered sender will send the batch to be indexed. * <p> * This will only trigger a batch to be sent automatically if {@link * value is {@code 512}. * * @param initialBatchActionCount The number of documents in a batch that will trigger it to be indexed. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code batchSize} is less than one. */ public SearchIndexingBufferedSenderBuilder<T> initialBatchActionCount(int initialBatchActionCount) { if (initialBatchActionCount < 1) { throw logger.logExceptionAsError(new IllegalArgumentException("'batchSize' cannot be less than one.")); } this.initialBatchActionCount = initialBatchActionCount; return this; } /** * Sets the number of times an action will retry indexing before it is considered failed. * <p> * Documents are only retried on retryable status codes. * <p> * Default value is {@code 3}. * * @param maxRetriesPerAction The number of times a document will retry indexing before it is considered * failed. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code maxRetriesPerAction} is less than one. */ public SearchIndexingBufferedSenderBuilder<T> maxRetriesPerAction(int maxRetriesPerAction) { if (maxRetriesPerAction < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxRetries' cannot be less than one.")); } this.maxRetriesPerAction = maxRetriesPerAction; return this; } /** * Sets the initial duration that requests will be delayed when the service is throttling. * <p> * Default value is {@code Duration.ofMillis(800)}. * * @param throttlingDelay The initial duration requests will delay when the service is throttling. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code throttlingDelay.isNegative()} or {@code throttlingDelay.isZero()} * is true. * @throws NullPointerException If {@code throttlingDelay} is null. */ public SearchIndexingBufferedSenderBuilder<T> throttlingDelay(Duration throttlingDelay) { Objects.requireNonNull(throttlingDelay, "'throttlingDelay' cannot be null."); if (throttlingDelay.isNegative() || throttlingDelay.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'throttlingDelay' cannot be negative or zero.")); } this.throttlingDelay = throttlingDelay; return this; } /** * Sets the maximum duration that requests will be delayed when the service is throttling. * <p> * If {@code maxThrottlingDelay} is less than {@link * * <p> * Default value is {@code Duration.ofMinutes(1)}. * * @param maxThrottlingDelay The maximum duration requests will delay when the service is throttling. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws IllegalArgumentException If {@code maxThrottlingDelay.isNegative()} or {@code * maxThrottlingDelay.isZero()} is true. * @throws NullPointerException If {@code maxThrottlingDelay} is null. */ public SearchIndexingBufferedSenderBuilder<T> maxThrottlingDelay(Duration maxThrottlingDelay) { Objects.requireNonNull(maxThrottlingDelay, "'maxThrottlingDelay' cannot be null."); if (maxThrottlingDelay.isNegative() || maxThrottlingDelay.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxThrottlingDelay' cannot be negative or zero.")); } this.maxThrottlingDelay = maxThrottlingDelay; return this; } /** * Callback hook for when a document indexing action has been added to a batch queued. * * @param onActionAddedConsumer The {@link Consumer} that is called when a document has been added to a batch * queue. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionAdded( Consumer<OnActionAddedOptions<T>> onActionAddedConsumer) { this.onActionAddedConsumer = onActionAddedConsumer; return this; } /** * Sets the callback hook for when a document indexing action has successfully completed indexing. * * @param onActionSucceededConsumer The {@link Consumer} that is called when a document has been successfully * indexing. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionSucceeded( Consumer<OnActionSucceededOptions<T>> onActionSucceededConsumer) { this.onActionSucceededConsumer = onActionSucceededConsumer; return this; } /** * Sets the callback hook for when a document indexing action has failed to index and isn't retryable. * * @param onActionErrorConsumer The {@link Consumer} that is called when a document has failed to index and * isn't retryable. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionError( Consumer<OnActionErrorOptions<T>> onActionErrorConsumer) { this.onActionErrorConsumer = onActionErrorConsumer; return this; } /** * Sets the callback hook for when a document indexing has been sent in a batching request. * * @param onActionSentConsumer The {@link Consumer} that is called when a document has been sent in a batch * request. * @return The updated SearchIndexingBufferedSenderBuilder object. */ public SearchIndexingBufferedSenderBuilder<T> onActionSent( Consumer<OnActionSentOptions<T>> onActionSentConsumer) { this.onActionSentConsumer = onActionSentConsumer; return this; } /** * Sets the function that retrieves the key value from a document. * * @param documentKeyRetriever Function that retrieves the key from an {@link IndexAction}. * @return The updated SearchIndexingBufferedSenderBuilder object. * @throws NullPointerException If {@code documentKeyRetriever} is null. */ public SearchIndexingBufferedSenderBuilder<T> documentKeyRetriever(Function<T, String> documentKeyRetriever) { this.documentKeyRetriever = Objects.requireNonNull(documentKeyRetriever, "'documentKeyRetriever' cannot be null"); return this; } } }
Same here - allow setting null values here.
public SearchIndexClientBuilder credential(AzureKeyCredential credential) { this.azureKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; }
this.azureKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
public SearchIndexClientBuilder credential(AzureKeyCredential credential) { this.azureKeyCredential = credential; return this; }
class SearchIndexClientBuilder { private final ClientLogger logger = new ClientLogger(SearchIndexClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private SearchServiceVersion serviceVersion; private String endpoint; private HttpClient httpClient; private HttpPipeline httpPipeline; private HttpLogOptions httpLogOptions; private ClientOptions clientOptions; private Configuration configuration; private RetryPolicy retryPolicy; private JsonSerializer jsonSerializer; /** * Creates a builder instance that is able to configure and construct {@link SearchIndexClient SearchIndexClients} * and {@link SearchIndexAsyncClient SearchIndexAsyncClients}. */ public SearchIndexClientBuilder() { } /** * Creates a {@link SearchIndexClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link SearchIndexClient} is created. * <p> * If {@link * endpoint} are used to create the {@link SearchIndexClient client}. All other builder settings are ignored. * * @return A SearchIndexClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} are {@code null}. */ public SearchIndexClient buildClient() { return new SearchIndexClient(buildAsyncClient()); } /** * Creates a {@link SearchIndexAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link SearchIndexAsyncClient} is created. * <p> * If {@link * endpoint} are used to create the {@link SearchIndexAsyncClient client}. All other builder settings are ignored. * * @return A SearchIndexAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} are {@code null}. */ public SearchIndexAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); SearchServiceVersion buildVersion = (serviceVersion == null) ? SearchServiceVersion.getLatest() : serviceVersion; if (httpPipeline != null) { return new SearchIndexAsyncClient(endpoint, buildVersion, httpPipeline, jsonSerializer); } HttpPipeline pipeline = Utility.buildHttpPipeline(clientOptions, httpLogOptions, configuration, retryPolicy, azureKeyCredential, tokenCredential, perCallPolicies, perRetryPolicies, httpClient, logger); return new SearchIndexAsyncClient(endpoint, buildVersion, pipeline, jsonSerializer); } /** * Sets the service endpoint for the Azure Cognitive Search instance. * * @param endpoint The URL of the Azure Cognitive Search instance. * @return The updated SearchIndexClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public SearchIndexClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * @param credential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated SearchIndexClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. * @throws IllegalArgumentException If {@link AzureKeyCredential */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param credential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated SearchIndexClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public SearchIndexClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the logging configuration for HTTP requests and responses. * <p> * If logging configurations aren't provided HTTP requests and responses won't be logged. * * @param logOptions The logging configuration for HTTP requests and responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Gets the default Azure Search headers and query parameters allow list. * * @return The default {@link HttpLogOptions} allow list. */ public static HttpLogOptions getDefaultLogOptions() { return Constants.DEFAULT_LOG_OPTIONS_SUPPLIER.get(); } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Adds a pipeline policy to apply to each request sent. * <p> * This method may be called multiple times, each time it is called the policy will be added to the end of added * policy list. All policies will be added after the retry policy. * * @param policy The pipeline policies to added to the policy list. * @return The updated SearchIndexClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public SearchIndexClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Custom JSON serializer that is used to handle model types that are not contained in the Azure Search Documents * library. * * @param jsonSerializer The serializer to serialize user defined models. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder serializer(JsonSerializer jsonSerializer) { this.jsonSerializer = jsonSerializer; return this; } /** * Sets the HTTP client to use for sending requests and receiving responses. * * @param client The HTTP client that will handle sending requests and receiving responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * building a {@link SearchIndexClient} or {@link SearchIndexAsyncClient}. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store that will be used. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that will attempt to retry requests when needed. * <p> * A default retry policy will be supplied if one isn't provided. * * @param retryPolicy The {@link RetryPolicy} that will attempt to retry requests when needed. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link SearchServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, {@link SearchServiceVersion * this default is used updating to a newer client library may result in a newer version of the service being used. * * @param serviceVersion The version of the service to be used when making requests. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder serviceVersion(SearchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } }
class SearchIndexClientBuilder { private final ClientLogger logger = new ClientLogger(SearchIndexClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private SearchServiceVersion serviceVersion; private String endpoint; private HttpClient httpClient; private HttpPipeline httpPipeline; private HttpLogOptions httpLogOptions; private ClientOptions clientOptions; private Configuration configuration; private RetryPolicy retryPolicy; private JsonSerializer jsonSerializer; /** * Creates a builder instance that is able to configure and construct {@link SearchIndexClient SearchIndexClients} * and {@link SearchIndexAsyncClient SearchIndexAsyncClients}. */ public SearchIndexClientBuilder() { } /** * Creates a {@link SearchIndexClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link SearchIndexClient} is created. * <p> * If {@link * endpoint} are used to create the {@link SearchIndexClient client}. All other builder settings are ignored. * * @return A SearchIndexClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} are {@code null}. */ public SearchIndexClient buildClient() { return new SearchIndexClient(buildAsyncClient()); } /** * Creates a {@link SearchIndexAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link SearchIndexAsyncClient} is created. * <p> * If {@link * endpoint} are used to create the {@link SearchIndexAsyncClient client}. All other builder settings are ignored. * * @return A SearchIndexAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} are {@code null}. */ public SearchIndexAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); SearchServiceVersion buildVersion = (serviceVersion == null) ? SearchServiceVersion.getLatest() : serviceVersion; if (httpPipeline != null) { return new SearchIndexAsyncClient(endpoint, buildVersion, httpPipeline, jsonSerializer); } HttpPipeline pipeline = Utility.buildHttpPipeline(clientOptions, httpLogOptions, configuration, retryPolicy, azureKeyCredential, tokenCredential, perCallPolicies, perRetryPolicies, httpClient, logger); return new SearchIndexAsyncClient(endpoint, buildVersion, pipeline, jsonSerializer); } /** * Sets the service endpoint for the Azure Cognitive Search instance. * * @param endpoint The URL of the Azure Cognitive Search instance. * @return The updated SearchIndexClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public SearchIndexClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * @param credential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated SearchIndexClientBuilder object. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param credential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * <p> * If logging configurations aren't provided HTTP requests and responses won't be logged. * * @param logOptions The logging configuration for HTTP requests and responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Gets the default Azure Search headers and query parameters allow list. * * @return The default {@link HttpLogOptions} allow list. */ public static HttpLogOptions getDefaultLogOptions() { return Constants.DEFAULT_LOG_OPTIONS_SUPPLIER.get(); } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Adds a pipeline policy to apply to each request sent. * <p> * This method may be called multiple times, each time it is called the policy will be added to the end of added * policy list. All policies will be added after the retry policy. * * @param policy The pipeline policies to added to the policy list. * @return The updated SearchIndexClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public SearchIndexClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Custom JSON serializer that is used to handle model types that are not contained in the Azure Search Documents * library. * * @param jsonSerializer The serializer to serialize user defined models. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder serializer(JsonSerializer jsonSerializer) { this.jsonSerializer = jsonSerializer; return this; } /** * Sets the HTTP client to use for sending requests and receiving responses. * * @param client The HTTP client that will handle sending requests and receiving responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * building a {@link SearchIndexClient} or {@link SearchIndexAsyncClient}. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store that will be used. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that will attempt to retry requests when needed. * <p> * A default retry policy will be supplied if one isn't provided. * * @param retryPolicy The {@link RetryPolicy} that will attempt to retry requests when needed. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link SearchServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, {@link SearchServiceVersion * this default is used updating to a newer client library may result in a newer version of the service being used. * * @param serviceVersion The version of the service to be used when making requests. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder serviceVersion(SearchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } }
This could be done using Reactor's test framework `StepVerifier`. https://projectreactor.io/docs/test/release/api/reactor/test/StepVerifier.html
public void downloadMetadataWithRedirectAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadMetadataAsync"); try { Flux<ByteBuffer> content = conversationAsyncClient.downloadStream(METADATA_URL); byte[] contentBytes = FluxUtil.collectBytesInByteBufferStream(content).block(); assertThat(contentBytes, is(notNullValue())); String metadata = new String(contentBytes, StandardCharsets.UTF_8); assertThat(metadata.contains("0-eus-d2-3cca2175891f21c6c9a5975a12c0141c"), is(true)); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } }
Flux<ByteBuffer> content = conversationAsyncClient.downloadStream(METADATA_URL);
public void downloadMetadataWithRedirectAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadMetadataAsync"); try { validateMetadata(conversationAsyncClient.downloadStream(METADATA_URL)); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } }
class DownloadContentAsyncLiveTests extends CallingServerTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadMetadataAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadMetadataAsync"); try { Flux<ByteBuffer> content = conversationAsyncClient.downloadStream(METADATA_URL); byte[] contentBytes = FluxUtil.collectBytesInByteBufferStream(content).block(); assertThat(contentBytes, is(notNullValue())); String metadata = new String(contentBytes, StandardCharsets.UTF_8); assertThat(metadata.contains("0-eus-d2-3cca2175891f21c6c9a5975a12c0141c"), is(true)); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadMetadataRetryingAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadMetadataRetryingAsync"); try { Flux<ByteBuffer> content = conversationAsyncClient.downloadStream(METADATA_URL); byte[] contentBytes = FluxUtil.collectBytesInByteBufferStream(content).block(); assertThat(contentBytes, is(notNullValue())); String metadata = new String(contentBytes, StandardCharsets.UTF_8); assertThat(metadata.contains("0-eus-d2-3cca2175891f21c6c9a5975a12c0141c"), is(true)); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadVideoAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadVideoAsync"); try { Response<Flux<ByteBuffer>> response = conversationAsyncClient.downloadStreamWithResponse(VIDEO_URL, null).block(); assertThat(response, is(notNullValue())); byte[] contentBytes = FluxUtil.collectBytesInByteBufferStream(response.getValue()).block(); assertThat(contentBytes, is(notNullValue())); assertThat(Integer.parseInt(response.getHeaders().getValue("Content-Length")), is(equalTo(contentBytes.length))); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadToFileAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadToFileAsync"); AsynchronousFileChannel channel = Mockito.mock(AsynchronousFileChannel.class); doAnswer(invocation -> { CompletionHandler<Integer, Object> completionHandler = invocation.getArgument(3); completionHandler.completed(439, null); return null; }).doAnswer(invocation -> { CompletionHandler<Integer, Object> completionHandler = invocation.getArgument(3); completionHandler.completed(438, null); return null; }).when(channel).write(any(ByteBuffer.class), anyLong(), any(), any()); conversationAsyncClient .downloadToWithResponse(METADATA_URL, Paths.get("dummyPath"), channel, new ParallelDownloadOptions().setBlockSize(479L), null).block(); Mockito.verify(channel, times(2)).write(any(ByteBuffer.class), anyLong(), any(), any()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadToFileRetryingAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadToFileRetryingAsync"); AsynchronousFileChannel channel = Mockito.mock(AsynchronousFileChannel.class); doAnswer(invocation -> { ByteBuffer stream = invocation.getArgument(0); String metadata = new String(stream.array(), StandardCharsets.UTF_8); assertTrue(metadata.contains("0-eus-d2-3cca2175891f21c6c9a5975a12c0141c")); CompletionHandler<Integer, Object> completionHandler = invocation.getArgument(3); completionHandler.completed(957, null); return null; }).when(channel).write(any(ByteBuffer.class), anyLong(), any(), any()); conversationAsyncClient .downloadToWithResponse(METADATA_URL, Paths.get("dummyPath"), channel, null, null).block(); Mockito.verify(channel).write(any(ByteBuffer.class), anyLong(), any(), any()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadContent404Async(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadContent404Async"); Response<Flux<ByteBuffer>> response = conversationAsyncClient .downloadStreamWithResponse(CONTENT_URL_404, null).block(); assertThat(response, is(notNullValue())); assertThat(response.getStatusCode(), is(equalTo(404))); assertThrows(CallingServerErrorException.class, () -> FluxUtil.collectBytesInByteBufferStream(response.getValue()).block()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") private CallingServerAsyncClient setupAsyncClient(CallingServerClientBuilder builder, String testName) { return addLoggingPolicy(builder, testName).buildAsyncClient(); } protected CallingServerClientBuilder addLoggingPolicy(CallingServerClientBuilder builder, String testName) { return builder.addPolicy((context, next) -> logHeaders(testName, next)); } }
class DownloadContentAsyncLiveTests extends CallingServerTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadMetadataAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadMetadataAsync"); try { validateMetadata(conversationAsyncClient.downloadStream(METADATA_URL)); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadMetadataRetryingAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadMetadataRetryingAsync"); try { validateMetadata(conversationAsyncClient.downloadStream(METADATA_URL)); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadVideoAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadVideoAsync"); try { StepVerifier.create(conversationAsyncClient.downloadStreamWithResponse(VIDEO_URL, null)) .consumeNextWith(response -> { StepVerifier.create(response.getValue()) .consumeNextWith(byteBuffer -> { assertThat(Integer.parseInt(response.getHeaders().getValue("Content-Length")), is(equalTo(byteBuffer.array().length))); }) .verifyComplete(); }) .verifyComplete(); } catch (Exception e) { System.out.println("Error: " + e.getMessage()); throw e; } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadToFileAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadToFileAsync"); AsynchronousFileChannel channel = Mockito.mock(AsynchronousFileChannel.class); doAnswer(invocation -> { CompletionHandler<Integer, Object> completionHandler = invocation.getArgument(3); completionHandler.completed(439, null); return null; }).doAnswer(invocation -> { CompletionHandler<Integer, Object> completionHandler = invocation.getArgument(3); completionHandler.completed(438, null); return null; }).when(channel).write(any(ByteBuffer.class), anyLong(), any(), any()); conversationAsyncClient .downloadToWithResponse(METADATA_URL, Paths.get("dummyPath"), channel, new ParallelDownloadOptions().setBlockSize(479L), null).block(); Mockito.verify(channel, times(2)).write(any(ByteBuffer.class), anyLong(), any(), any()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadToFileRetryingAsync(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadToFileRetryingAsync"); AsynchronousFileChannel channel = Mockito.mock(AsynchronousFileChannel.class); doAnswer(invocation -> { ByteBuffer stream = invocation.getArgument(0); String metadata = new String(stream.array(), StandardCharsets.UTF_8); assertTrue(metadata.contains("0-eus-d2-3cca2175891f21c6c9a5975a12c0141c")); CompletionHandler<Integer, Object> completionHandler = invocation.getArgument(3); completionHandler.completed(957, null); return null; }).when(channel).write(any(ByteBuffer.class), anyLong(), any(), any()); conversationAsyncClient .downloadToWithResponse(METADATA_URL, Paths.get("dummyPath"), channel, null, null).block(); Mockito.verify(channel).write(any(ByteBuffer.class), anyLong(), any(), any()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void downloadContent404Async(HttpClient httpClient) { CallingServerClientBuilder builder = getConversationClientUsingConnectionString(httpClient); CallingServerAsyncClient conversationAsyncClient = setupAsyncClient(builder, "downloadContent404Async"); StepVerifier.create(conversationAsyncClient.downloadStreamWithResponse(CONTENT_URL_404, null)) .consumeNextWith(response -> { assertThat(response.getStatusCode(), is(equalTo(404))); StepVerifier.create(response.getValue()).verifyError(CallingServerErrorException.class); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") private CallingServerAsyncClient setupAsyncClient(CallingServerClientBuilder builder, String testName) { return addLoggingPolicy(builder, testName).buildAsyncClient(); } protected CallingServerClientBuilder addLoggingPolicy(CallingServerClientBuilder builder, String testName) { return builder.addPolicy((context, next) -> logHeaders(testName, next)); } private void validateMetadata(Flux<ByteBuffer> metadataByteBuffer) { StepVerifier.create(metadataByteBuffer) .consumeNextWith(byteBuffer -> { String metadata = new String(byteBuffer.array(), StandardCharsets.UTF_8); assertThat(metadata.contains("0-eus-d2-3cca2175891f21c6c9a5975a12c0141c"), is(true)); }) .verifyComplete(); } }
please factor out this code outside of the `AsyncCtlWorkload.java` to be used by all benchmark components.
public AsyncCtlWorkload(Configuration cfg) { String preferredRegionsList = cfg.getPreferredRegionsList(); List<String> preferredRegions = null; if (StringUtils.isNotEmpty(preferredRegionsList)) { String[] preferredArray = preferredRegionsList.split(","); if (preferredArray != null && preferredArray.length > 0) { preferredRegions = new ArrayList<>(Arrays.asList(preferredArray)); } } CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .preferredRegions(preferredRegions) .consistencyLevel(cfg.getConsistencyLevel()) .clientTelemetryEnabled(true) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } reporter = ScheduledReporterFactory.create(cfg, metricsRegistry); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); }
}
public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .preferredRegions(cfg.getPreferredRegionsList()) .consistencyLevel(cfg.getConsistencyLevel()) .clientTelemetryEnabled(true) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } reporter = ScheduledReporterFactory.create(cfg, metricsRegistry); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } }
What about other status codes like 301 (moved permanently), 307 (temporary redirect) and 308 (permanent redirect)? Also, instead of using magic numbers, use named constants.
private boolean shouldRedirect(HttpResponse response, int redirectNumber, Set<String> locations) { return response.getStatusCode() == 302 && !locations.contains(response.getHeaderValue(LOCATION_HEADER_NAME)) && redirectNumber < MAX_REDIRECTS; }
return response.getStatusCode() == 302
private boolean shouldRedirect(HttpResponse response, int redirectNumber, Set<String> locations) { return response.getStatusCode() == 302 && !locations.contains(response.getHeaderValue(LOCATION_HEADER_NAME)) && redirectNumber < MAX_REDIRECTS; }
class RedirectPolicy implements HttpPipelinePolicy { private static final int MAX_REDIRECTS = 10; private static final String LOCATION_HEADER_NAME = "Location"; @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptRedirection(context, next, 0, new HashSet<>()); } private Mono<HttpResponse> attemptRedirection(HttpPipelineCallContext context, HttpPipelineNextPolicy next, int redirectNumber, Set<String> locations) { return next.clone().process().flatMap(httpResponse -> { if (shouldRedirect(httpResponse, redirectNumber, locations)) { String newLocation = httpResponse.getHeaderValue(LOCATION_HEADER_NAME); locations.add(newLocation); HttpRequest newRequest = context.getHttpRequest().copy(); newRequest.setUrl(newLocation); context.setHttpRequest(newRequest); return attemptRedirection(context, next, redirectNumber + 1, locations); } return Mono.just(httpResponse); }); } }
class RedirectPolicy implements HttpPipelinePolicy { private static final int MAX_REDIRECTS = 10; private static final String LOCATION_HEADER_NAME = "Location"; @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptRedirection(context, next, 0, new HashSet<>()); } private Mono<HttpResponse> attemptRedirection(HttpPipelineCallContext context, HttpPipelineNextPolicy next, int redirectNumber, Set<String> locations) { return next.clone().process().flatMap(httpResponse -> { if (shouldRedirect(httpResponse, redirectNumber, locations)) { String newLocation = httpResponse.getHeaderValue(LOCATION_HEADER_NAME); locations.add(newLocation); HttpRequest newRequest = context.getHttpRequest().copy(); newRequest.setUrl(newLocation); context.setHttpRequest(newRequest); return attemptRedirection(context, next, redirectNumber + 1, locations); } return Mono.just(httpResponse); }); } }
Should this be made into a separate test that has an assumption of `TestMode != PLAYBACK` #Resolved
public void authenticationScopeTest(HttpClient httpClient) { String authority = getAuthority(REGISTRY_ENDPOINT); if (authority == AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) { ContainerRegistryClient registryClient = getContainerRegistryBuilder(httpClient) .authenticationScope(AZURE_GLOBAL_AUTHENTICATION_SCOPE) .buildClient(); List<String> repositories = registryClient.listRepositoryNames().stream().collect(Collectors.toList()); validateRepositories(repositories); if (getTestMode() != TestMode.PLAYBACK) { ContainerRegistryClient throwableRegistryClient = getContainerRegistryBuilder(httpClient) .authenticationScope(AZURE_GOV_AUTHENTICATION_SCOPE) .buildClient(); assertThrows(ClientAuthenticationException.class, () -> throwableRegistryClient.listRepositoryNames().stream().collect(Collectors.toList())); } } }
}
public void authenticationScopeTest(HttpClient httpClient) { Assumptions.assumeFalse(getTestMode().equals(TestMode.PLAYBACK)); Assumptions.assumeFalse(REGISTRY_ENDPOINT == null); Assumptions.assumeTrue(getAuthority(REGISTRY_ENDPOINT).equals(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)); ContainerRegistryClient registryClient = getContainerRegistryBuilder(httpClient) .authenticationScope(AZURE_GLOBAL_AUTHENTICATION_SCOPE) .buildClient(); List<String> repositories = registryClient.listRepositoryNames().stream().collect(Collectors.toList()); validateRepositories(repositories); ContainerRegistryClient throwableRegistryClient = getContainerRegistryBuilder(httpClient) .authenticationScope(AZURE_GOV_AUTHENTICATION_SCOPE) .buildClient(); assertThrows(ClientAuthenticationException.class, () -> throwableRegistryClient.listRepositoryNames().stream().collect(Collectors.toList())); }
class ContainerRegistryClientIntegrationTests extends ContainerRegistryClientsTestBase { private ContainerRegistryAsyncClient registryAsyncClient; private ContainerRegistryClient registryClient; private ContainerRegistryAsyncClient getContainerRegistryAsyncClient(HttpClient httpClient) { return getContainerRegistryBuilder(httpClient).buildAsyncClient(); } private ContainerRegistryClient getContainerRegistryClient(HttpClient httpClient) { return getContainerRegistryBuilder(httpClient).buildClient(); } @BeforeEach void beforeEach() { TestUtils.importImage(getTestMode(), HELLO_WORLD_REPOSITORY_NAME, Arrays.asList("latest", "v1", "v2", "v3", "v4")); TestUtils.importImage( getTestMode(), ALPINE_REPOSITORY_NAME, Arrays.asList( LATEST_TAG_NAME, V1_TAG_NAME, V2_TAG_NAME, V3_TAG_NAME, V4_TAG_NAME)); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void listRepositoryNames(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); StepVerifier.create(registryAsyncClient.listRepositoryNames()) .recordWith(ArrayList::new) .thenConsumeWhile(x -> true) .expectRecordedMatches(this::validateRepositories) .verifyComplete(); List<String> repositories = registryClient.listRepositoryNames().stream().collect(Collectors.toList()); validateRepositories(repositories); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void listRepositoryNamesWithPageSize(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); StepVerifier.create(registryAsyncClient.listRepositoryNames().byPage(PAGESIZE_1)) .recordWith(ArrayList::new) .thenConsumeWhile(x -> true) .expectRecordedMatches(this::validateRepositoriesByPage) .verifyComplete(); ArrayList<String> repositories = new ArrayList<>(); registryClient.listRepositoryNames().iterableByPage(PAGESIZE_1) .forEach(res -> repositories.addAll(res.getValue())); validateRepositories(repositories); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void listRepositoryNamesWithInvalidPageSize(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); ArrayList<String> repositories = new ArrayList<>(); assertThrows(IllegalArgumentException.class, () -> registryClient.listRepositoryNames().iterableByPage(-1) .forEach(res -> repositories.addAll(res.getValue()))); StepVerifier.create(registryAsyncClient.listRepositoryNames().byPage(-1)) .verifyError(IllegalArgumentException.class); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void deleteRepositoryWithResponseThrows(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); StepVerifier.create(registryAsyncClient.deleteRepositoryWithResponse(null)) .verifyError(NullPointerException.class); assertThrows(NullPointerException.class, () -> registryClient.deleteRepository(null)); assertThrows(NullPointerException.class, () -> registryClient.deleteRepositoryWithResponse(null, Context.NONE)); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void getContainerRepository(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); ContainerRepositoryAsync repositoryAsync = registryAsyncClient.getRepository(HELLO_WORLD_REPOSITORY_NAME); assertNotNull(repositoryAsync); StepVerifier.create(repositoryAsync.getProperties()) .assertNext(this::validateProperties) .verifyComplete(); ContainerRepository repository = registryClient.getRepository(HELLO_WORLD_REPOSITORY_NAME); assertNotNull(repository); validateProperties(repository.getProperties()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void getArtifactRegistry(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); RegistryArtifactAsync registryArtifactAsync = registryAsyncClient.getArtifact(HELLO_WORLD_REPOSITORY_NAME, LATEST_TAG_NAME); assertNotNull(registryArtifactAsync); StepVerifier.create(registryArtifactAsync.getManifestProperties()) .assertNext(res -> validateManifestProperties(res, true, false)) .verifyComplete(); RegistryArtifact registryArtifact = registryClient.getArtifact(HELLO_WORLD_REPOSITORY_NAME, LATEST_TAG_NAME); assertNotNull(registryArtifact); validateManifestProperties(registryArtifact.getManifestProperties(), true, false); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void convenienceProperties(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); String registryEndpoint = REGISTRY_ENDPOINT; if (getTestMode() == TestMode.PLAYBACK) { registryEndpoint = REGISTRY_ENDPOINT_PLAYBACK; } assertEquals(registryEndpoint, registryAsyncClient.getEndpoint()); assertEquals(registryEndpoint, registryClient.getEndpoint()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") }
class ContainerRegistryClientIntegrationTests extends ContainerRegistryClientsTestBase { private ContainerRegistryAsyncClient registryAsyncClient; private ContainerRegistryClient registryClient; private ContainerRegistryAsyncClient getContainerRegistryAsyncClient(HttpClient httpClient) { return getContainerRegistryBuilder(httpClient).buildAsyncClient(); } private ContainerRegistryClient getContainerRegistryClient(HttpClient httpClient) { return getContainerRegistryBuilder(httpClient).buildClient(); } @BeforeEach void beforeEach() { TestUtils.importImage(getTestMode(), HELLO_WORLD_REPOSITORY_NAME, Arrays.asList("latest", "v1", "v2", "v3", "v4")); TestUtils.importImage( getTestMode(), ALPINE_REPOSITORY_NAME, Arrays.asList( LATEST_TAG_NAME, V1_TAG_NAME, V2_TAG_NAME, V3_TAG_NAME, V4_TAG_NAME)); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void listRepositoryNames(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); StepVerifier.create(registryAsyncClient.listRepositoryNames()) .recordWith(ArrayList::new) .thenConsumeWhile(x -> true) .expectRecordedMatches(this::validateRepositories) .verifyComplete(); List<String> repositories = registryClient.listRepositoryNames().stream().collect(Collectors.toList()); validateRepositories(repositories); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void listRepositoryNamesWithPageSize(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); StepVerifier.create(registryAsyncClient.listRepositoryNames().byPage(PAGESIZE_1)) .recordWith(ArrayList::new) .thenConsumeWhile(x -> true) .expectRecordedMatches(this::validateRepositoriesByPage) .verifyComplete(); ArrayList<String> repositories = new ArrayList<>(); registryClient.listRepositoryNames().iterableByPage(PAGESIZE_1) .forEach(res -> repositories.addAll(res.getValue())); validateRepositories(repositories); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void listRepositoryNamesWithInvalidPageSize(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); ArrayList<String> repositories = new ArrayList<>(); assertThrows(IllegalArgumentException.class, () -> registryClient.listRepositoryNames().iterableByPage(-1) .forEach(res -> repositories.addAll(res.getValue()))); StepVerifier.create(registryAsyncClient.listRepositoryNames().byPage(-1)) .verifyError(IllegalArgumentException.class); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void deleteRepositoryWithResponseThrows(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); StepVerifier.create(registryAsyncClient.deleteRepositoryWithResponse(null)) .verifyError(NullPointerException.class); assertThrows(NullPointerException.class, () -> registryClient.deleteRepository(null)); assertThrows(NullPointerException.class, () -> registryClient.deleteRepositoryWithResponse(null, Context.NONE)); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void getContainerRepository(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); ContainerRepositoryAsync repositoryAsync = registryAsyncClient.getRepository(HELLO_WORLD_REPOSITORY_NAME); assertNotNull(repositoryAsync); StepVerifier.create(repositoryAsync.getProperties()) .assertNext(this::validateProperties) .verifyComplete(); ContainerRepository repository = registryClient.getRepository(HELLO_WORLD_REPOSITORY_NAME); assertNotNull(repository); validateProperties(repository.getProperties()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void getArtifactRegistry(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); RegistryArtifactAsync registryArtifactAsync = registryAsyncClient.getArtifact(HELLO_WORLD_REPOSITORY_NAME, LATEST_TAG_NAME); assertNotNull(registryArtifactAsync); StepVerifier.create(registryArtifactAsync.getManifestProperties()) .assertNext(res -> validateManifestProperties(res, true, false)) .verifyComplete(); RegistryArtifact registryArtifact = registryClient.getArtifact(HELLO_WORLD_REPOSITORY_NAME, LATEST_TAG_NAME); assertNotNull(registryArtifact); validateManifestProperties(registryArtifact.getManifestProperties(), true, false); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") public void convenienceProperties(HttpClient httpClient) { registryAsyncClient = getContainerRegistryAsyncClient(httpClient); registryClient = getContainerRegistryClient(httpClient); String registryEndpoint = REGISTRY_ENDPOINT; if (getTestMode() == TestMode.PLAYBACK) { registryEndpoint = REGISTRY_ENDPOINT_PLAYBACK; } assertEquals(registryEndpoint, registryAsyncClient.getEndpoint()); assertEquals(registryEndpoint, registryClient.getEndpoint()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") }
This should be made into an assumption #Resolved
public void listAnonymousRepositories(HttpClient httpClient) { if (TestUtils.getAuthority(ANONYMOUS_REGISTRY_ENDPOINT) == AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) { ContainerRegistryClient client = getContainerRegistryBuilder(httpClient, null, ANONYMOUS_REGISTRY_ENDPOINT).buildClient(); List<String> repositories = client.listRepositoryNames().stream().collect(Collectors.toList()); assertTrue(repositories.stream().anyMatch(HELLO_WORLD_REPOSITORY_NAME::equals)); } }
if (TestUtils.getAuthority(ANONYMOUS_REGISTRY_ENDPOINT) == AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) {
public void listAnonymousRepositories(HttpClient httpClient) { Assumptions.assumeFalse(ANONYMOUS_REGISTRY_ENDPOINT == null); Assumptions.assumeTrue(getAuthority(ANONYMOUS_REGISTRY_ENDPOINT).equals(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)); ContainerRegistryClient client = getContainerRegistryBuilder(httpClient, null, ANONYMOUS_REGISTRY_ENDPOINT).buildClient(); List<String> repositories = client.listRepositoryNames().stream().collect(Collectors.toList()); assertTrue(repositories.stream().anyMatch(HELLO_WORLD_REPOSITORY_NAME::equals)); }
class ContainerRepositoryAnonymousAccessTests extends ContainerRegistryClientsTestBase { @BeforeEach void beforeEach() { TestUtils.importImageAsync(ImplUtils.getTestMode(), ANONYMOUS_REGISTRY_NAME, HELLO_WORLD_REPOSITORY_NAME, Arrays.asList("latest", "v1", "v2", "v3", "v4"), ANONYMOUS_REGISTRY_ENDPOINT).block(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") }
class ContainerRepositoryAnonymousAccessTests extends ContainerRegistryClientsTestBase { @BeforeEach void beforeEach() { TestUtils.importImageAsync(getTestMode(), ANONYMOUS_REGISTRY_NAME, HELLO_WORLD_REPOSITORY_NAME, Arrays.asList("latest", "v1", "v2", "v3", "v4"), ANONYMOUS_REGISTRY_ENDPOINT).block(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") }
[TIL] Thanks!
public void listAnonymousRepositories(HttpClient httpClient) { if (TestUtils.getAuthority(ANONYMOUS_REGISTRY_ENDPOINT) == AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) { ContainerRegistryClient client = getContainerRegistryBuilder(httpClient, null, ANONYMOUS_REGISTRY_ENDPOINT).buildClient(); List<String> repositories = client.listRepositoryNames().stream().collect(Collectors.toList()); assertTrue(repositories.stream().anyMatch(HELLO_WORLD_REPOSITORY_NAME::equals)); } }
if (TestUtils.getAuthority(ANONYMOUS_REGISTRY_ENDPOINT) == AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) {
public void listAnonymousRepositories(HttpClient httpClient) { Assumptions.assumeFalse(ANONYMOUS_REGISTRY_ENDPOINT == null); Assumptions.assumeTrue(getAuthority(ANONYMOUS_REGISTRY_ENDPOINT).equals(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)); ContainerRegistryClient client = getContainerRegistryBuilder(httpClient, null, ANONYMOUS_REGISTRY_ENDPOINT).buildClient(); List<String> repositories = client.listRepositoryNames().stream().collect(Collectors.toList()); assertTrue(repositories.stream().anyMatch(HELLO_WORLD_REPOSITORY_NAME::equals)); }
class ContainerRepositoryAnonymousAccessTests extends ContainerRegistryClientsTestBase { @BeforeEach void beforeEach() { TestUtils.importImageAsync(ImplUtils.getTestMode(), ANONYMOUS_REGISTRY_NAME, HELLO_WORLD_REPOSITORY_NAME, Arrays.asList("latest", "v1", "v2", "v3", "v4"), ANONYMOUS_REGISTRY_ENDPOINT).block(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") }
class ContainerRepositoryAnonymousAccessTests extends ContainerRegistryClientsTestBase { @BeforeEach void beforeEach() { TestUtils.importImageAsync(getTestMode(), ANONYMOUS_REGISTRY_NAME, HELLO_WORLD_REPOSITORY_NAME, Arrays.asList("latest", "v1", "v2", "v3", "v4"), ANONYMOUS_REGISTRY_ENDPOINT).block(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("getHttpClients") }
It would be good to add a log here to specify where the proxy was loaded from. Since this should only happen once per client creation, we can log at info level.
private static ProxyOptions attemptToLoadProxy(Configuration configuration, boolean createUnresolved) { ProxyOptions proxyOptions; if (Boolean.parseBoolean(configuration.get(JAVA_SYSTEM_PROXY_PREREQUISITE))) { proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTPS_PROXY); if (proxyOptions != null) { return proxyOptions; } proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTP_PROXY); if (proxyOptions != null) { return proxyOptions; } } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTPS); if (proxyOptions != null) { return proxyOptions; } return attemptToLoadJavaProxy(configuration, createUnresolved, HTTP); }
}
private static ProxyOptions attemptToLoadProxy(Configuration configuration, boolean createUnresolved) { ProxyOptions proxyOptions; if (Boolean.parseBoolean(configuration.get(JAVA_SYSTEM_PROXY_PREREQUISITE))) { proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTPS_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTPS_PROXY environment variable."); return proxyOptions; } proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTP_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTP_PROXY environment variable."); return proxyOptions; } } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTPS); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTPS system properties."); return proxyOptions; } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTP); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTP system properties."); return proxyOptions; } return null; }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether system proxy configurations (HTTPS_PROXY, HTTP_PROXY) are allowed to be used. */ private static final String JAVA_SYSTEM_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded the proxy address is DNS resolved. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { return fromConfiguration(configuration, false); } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded, the proxy address is DNS resolved based on {@code createUnresolved}. When {@code * createUnresolved} is true resolving {@link * calls. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @param createUnresolved Flag determining whether the returned {@link ProxyOptions} is unresolved. * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration, boolean createUnresolved) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; return attemptToLoadProxy(proxyConfiguration, createUnresolved); } private static ProxyOptions attemptToLoadSystemProxy(Configuration configuration, boolean createUnresolved, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(proxyUrl.getHost(), port) : new InetSocketAddress(proxyUrl.getHost(), port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ String[] nonProxyHosts = noProxyString.split(","); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'NO_PROXY' doesn't have a strongly standardized format, for now we are going to support values beginning * and ending with '*' or '.' to exclude an entire domain and will quote the value between the prefix and * suffix. In the future this may need to be updated to support more complex scenarios required by * 'NO_PROXY' users such as wild cards within the domain exclusion. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; /* * First check if the non-proxy host begins with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.startsWith(".*")) { prefixWildcard = ".*"; body = body.substring(2); } else if (body.startsWith("*") || body.startsWith(".")) { prefixWildcard = ".*"; body = body.substring(1); } /* * First check if the non-proxy host ends with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.endsWith(".*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 2); } else if (body.endsWith("*") || body.endsWith(".")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, boolean createUnresolved, String type) { String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(host, port) : new InetSocketAddress(host, port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.setNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ String[] nonProxyHosts = nonProxyHostsString.split("\\|"); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'http.nonProxyHosts' values are allowed to begin and end with '*' but this is an invalid value for a * pattern, so we need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether system proxy configurations (HTTPS_PROXY, HTTP_PROXY) are allowed to be used. */ private static final String JAVA_SYSTEM_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded the proxy address is DNS resolved. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { return fromConfiguration(configuration, false); } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded, the proxy address is DNS resolved based on {@code createUnresolved}. When {@code * createUnresolved} is true resolving {@link * calls. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @param createUnresolved Flag determining whether the returned {@link ProxyOptions} is unresolved. * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration, boolean createUnresolved) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; return attemptToLoadProxy(proxyConfiguration, createUnresolved); } private static ProxyOptions attemptToLoadSystemProxy(Configuration configuration, boolean createUnresolved, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(proxyUrl.getHost(), port) : new InetSocketAddress(proxyUrl.getHost(), port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ String[] nonProxyHosts = noProxyString.split(","); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'NO_PROXY' doesn't have a strongly standardized format, for now we are going to support values beginning * and ending with '*' or '.' to exclude an entire domain and will quote the value between the prefix and * suffix. In the future this may need to be updated to support more complex scenarios required by * 'NO_PROXY' users such as wild cards within the domain exclusion. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; /* * First check if the non-proxy host begins with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.startsWith(".*")) { prefixWildcard = ".*"; body = body.substring(2); } else if (body.startsWith("*") || body.startsWith(".")) { prefixWildcard = ".*"; body = body.substring(1); } /* * First check if the non-proxy host ends with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.endsWith(".*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 2); } else if (body.endsWith("*") || body.endsWith(".")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, boolean createUnresolved, String type) { String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(host, port) : new InetSocketAddress(host, port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.setNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ String[] nonProxyHosts = nonProxyHostsString.split("\\|"); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'http.nonProxyHosts' values are allowed to begin and end with '*' but this is an invalid value for a * pattern, so we need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
Added logging. I used the verbose log level as this felt more like a debugging scenario log
private static ProxyOptions attemptToLoadProxy(Configuration configuration, boolean createUnresolved) { ProxyOptions proxyOptions; if (Boolean.parseBoolean(configuration.get(JAVA_SYSTEM_PROXY_PREREQUISITE))) { proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTPS_PROXY); if (proxyOptions != null) { return proxyOptions; } proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTP_PROXY); if (proxyOptions != null) { return proxyOptions; } } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTPS); if (proxyOptions != null) { return proxyOptions; } return attemptToLoadJavaProxy(configuration, createUnresolved, HTTP); }
}
private static ProxyOptions attemptToLoadProxy(Configuration configuration, boolean createUnresolved) { ProxyOptions proxyOptions; if (Boolean.parseBoolean(configuration.get(JAVA_SYSTEM_PROXY_PREREQUISITE))) { proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTPS_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTPS_PROXY environment variable."); return proxyOptions; } proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTP_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTP_PROXY environment variable."); return proxyOptions; } } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTPS); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTPS system properties."); return proxyOptions; } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTP); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTP system properties."); return proxyOptions; } return null; }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether system proxy configurations (HTTPS_PROXY, HTTP_PROXY) are allowed to be used. */ private static final String JAVA_SYSTEM_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded the proxy address is DNS resolved. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { return fromConfiguration(configuration, false); } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded, the proxy address is DNS resolved based on {@code createUnresolved}. When {@code * createUnresolved} is true resolving {@link * calls. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @param createUnresolved Flag determining whether the returned {@link ProxyOptions} is unresolved. * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration, boolean createUnresolved) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; return attemptToLoadProxy(proxyConfiguration, createUnresolved); } private static ProxyOptions attemptToLoadSystemProxy(Configuration configuration, boolean createUnresolved, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(proxyUrl.getHost(), port) : new InetSocketAddress(proxyUrl.getHost(), port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ String[] nonProxyHosts = noProxyString.split(","); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'NO_PROXY' doesn't have a strongly standardized format, for now we are going to support values beginning * and ending with '*' or '.' to exclude an entire domain and will quote the value between the prefix and * suffix. In the future this may need to be updated to support more complex scenarios required by * 'NO_PROXY' users such as wild cards within the domain exclusion. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; /* * First check if the non-proxy host begins with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.startsWith(".*")) { prefixWildcard = ".*"; body = body.substring(2); } else if (body.startsWith("*") || body.startsWith(".")) { prefixWildcard = ".*"; body = body.substring(1); } /* * First check if the non-proxy host ends with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.endsWith(".*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 2); } else if (body.endsWith("*") || body.endsWith(".")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, boolean createUnresolved, String type) { String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(host, port) : new InetSocketAddress(host, port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.setNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ String[] nonProxyHosts = nonProxyHostsString.split("\\|"); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'http.nonProxyHosts' values are allowed to begin and end with '*' but this is an invalid value for a * pattern, so we need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether system proxy configurations (HTTPS_PROXY, HTTP_PROXY) are allowed to be used. */ private static final String JAVA_SYSTEM_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded the proxy address is DNS resolved. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { return fromConfiguration(configuration, false); } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded, the proxy address is DNS resolved based on {@code createUnresolved}. When {@code * createUnresolved} is true resolving {@link * calls. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @param createUnresolved Flag determining whether the returned {@link ProxyOptions} is unresolved. * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration, boolean createUnresolved) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; return attemptToLoadProxy(proxyConfiguration, createUnresolved); } private static ProxyOptions attemptToLoadSystemProxy(Configuration configuration, boolean createUnresolved, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(proxyUrl.getHost(), port) : new InetSocketAddress(proxyUrl.getHost(), port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ String[] nonProxyHosts = noProxyString.split(","); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'NO_PROXY' doesn't have a strongly standardized format, for now we are going to support values beginning * and ending with '*' or '.' to exclude an entire domain and will quote the value between the prefix and * suffix. In the future this may need to be updated to support more complex scenarios required by * 'NO_PROXY' users such as wild cards within the domain exclusion. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; /* * First check if the non-proxy host begins with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.startsWith(".*")) { prefixWildcard = ".*"; body = body.substring(2); } else if (body.startsWith("*") || body.startsWith(".")) { prefixWildcard = ".*"; body = body.substring(1); } /* * First check if the non-proxy host ends with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.endsWith(".*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 2); } else if (body.endsWith("*") || body.endsWith(".")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, boolean createUnresolved, String type) { String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(host, port) : new InetSocketAddress(host, port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.setNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ String[] nonProxyHosts = nonProxyHostsString.split("\\|"); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'http.nonProxyHosts' values are allowed to begin and end with '*' but this is an invalid value for a * pattern, so we need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
Ned javadoc
private Attributes convertToOtelAttributes(Map<String, Object> attributes) { AttributesBuilder attributesBuilder = Attributes.builder(); attributes.forEach((key, value) -> { if (value instanceof Boolean) { attributesBuilder.put(key, (boolean) value); } else if (value instanceof String) { attributesBuilder.put(key, String.valueOf(value)); } else if (value instanceof Double) { attributesBuilder.put(key, (Double) value); } else if (value instanceof Long) { attributesBuilder.put(key, (Long) value); } else if (value instanceof String[]) { attributesBuilder.put(key, (String[]) value); } else if (value instanceof long[]) { attributesBuilder.put(key, (long[]) value); } else if (value instanceof double[]) { attributesBuilder.put(key, (double[]) value); } else if (value instanceof boolean[]) { attributesBuilder.put(key, (boolean[]) value); } }); return attributesBuilder.build(); }
} else if (value instanceof boolean[]) {
private Attributes convertToOtelAttributes(Map<String, Object> attributes) { AttributesBuilder attributesBuilder = Attributes.builder(); attributes.forEach((key, value) -> { if (value instanceof Boolean) { attributesBuilder.put(key, (boolean) value); } else if (value instanceof String) { attributesBuilder.put(key, String.valueOf(value)); } else if (value instanceof Double) { attributesBuilder.put(key, (Double) value); } else if (value instanceof Long) { attributesBuilder.put(key, (Long) value); } else if (value instanceof String[]) { attributesBuilder.put(key, (String[]) value); } else if (value instanceof long[]) { attributesBuilder.put(key, (long[]) value); } else if (value instanceof double[]) { attributesBuilder.put(key, (double[]) value); } else if (value instanceof boolean[]) { attributesBuilder.put(key, (boolean[]) value); } else { logger.warning("Could not populate attribute with key '{}', type is not supported."); } }); return attributesBuilder.build(); }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private final Tracer tracer = GlobalOpenTelemetry.getTracer("Azure-OpenTelemetry"); static final String AZ_NAMESPACE_KEY = "az.namespace"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); SpanBuilder spanBuilder = createSpanBuilder( spanName, null, SpanKind.INTERNAL, null, context); return startSpanInternal(spanBuilder, null, false, context); } /** * {@inheritDoc} */ @Override public Context start(String spanName, StartSpanOptions options, Context context) { Objects.requireNonNull(options, "'options' cannot be null."); SpanBuilder spanBuilder = createSpanBuilder( spanName, null, convertToOtelKind(options.getSpanKind()), options.getAttributes(), context); return startSpanInternal(spanBuilder, null, options.getMakeCurrent(), context); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, SpanBuilder.class); if (spanBuilder == null) { return context; } return startSpanInternal(spanBuilder, this::addMessagingAttributes, false, context); case MESSAGE: spanBuilder = createSpanBuilder(spanName, null, SpanKind.PRODUCER, null, context); context = startSpanInternal(spanBuilder, this::addMessagingAttributes, false, context); return setDiagnosticId(context); case PROCESS: SpanContext remoteParentContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); spanBuilder = createSpanBuilder(spanName, remoteParentContext, SpanKind.CONSUMER, null, context); return startSpanInternal(spanBuilder, this::addMessagingAttributes, true, context); default: return context; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.isRecording()) { span = HttpTraceUtil.setSpanStatus(span, responseCode, throwable); } span.end(); endScope(context); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { Objects.requireNonNull(context, "'context' cannot be null"); if (CoreUtils.isNullOrEmpty(value)) { logger.verbose("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.setAttribute(key, value); } else { logger.verbose("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.verbose("Failed to find span to end it."); return; } if (span.isRecording()) { span = AmqpTraceUtil.parseStatusMessage(span, statusMessage, throwable); } span.end(); endScope(context); } @Override public void addLink(Context context) { final SpanBuilder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, SpanBuilder.class); if (spanBuilder == null) { logger.verbose("Failed to find spanBuilder to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.verbose("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override public Context getSharedSpanBuilder(String spanName, Context context) { return context.addData(SPAN_BUILDER_KEY, createSpanBuilder(spanName, null, SpanKind.CLIENT, null, context)); } /** * {@inheritDoc} */ @Override @SuppressWarnings("deprecation") public void addEvent(String eventName, Map<String, Object> traceEventAttributes, OffsetDateTime timestamp) { addEvent(eventName, traceEventAttributes, timestamp, new Context(PARENT_SPAN_KEY, Span.current())); } /** * {@inheritDoc} */ @Override public void addEvent(String eventName, Map<String, Object> traceEventAttributes, OffsetDateTime timestamp, Context context) { Objects.requireNonNull(eventName, "'eventName' cannot be null."); Span currentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (currentSpan == null) { logger.verbose("Failed to find a starting span to associate the {} with.", eventName); return; } if (timestamp == null) { currentSpan.addEvent( eventName, traceEventAttributes == null ? Attributes.empty() : convertToOtelAttributes(traceEventAttributes)); } else { currentSpan.addEvent( eventName, traceEventAttributes == null ? Attributes.empty() : convertToOtelAttributes(traceEventAttributes), timestamp.toInstant() ); } } private Context startSpanInternal(SpanBuilder spanBuilder, java.util.function.BiConsumer<Span, Context> setAttributes, boolean makeCurrent, Context context) { Objects.requireNonNull(spanBuilder, "'spanBuilder' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Span span = spanBuilder.startSpan(); if (span.isRecording()) { String tracingNamespace = getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, null, String.class); if (tracingNamespace != null) { span.setAttribute(AZ_NAMESPACE_KEY, tracingNamespace); } if (setAttributes != null) { setAttributes.accept(span, context); } } context = context.addData(PARENT_SPAN_KEY, span); if (makeCurrent) { return context.addData(SCOPE_KEY, span.makeCurrent()); } return context; } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent * being the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.SpanBuilder} to create and start a new {@code Span}. */ @SuppressWarnings("unchecked") private SpanBuilder createSpanBuilder(String spanName, SpanContext remoteParentContext, SpanKind spanKind, Map<String, Object> beforeSaplingAttributes, Context context) { String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } SpanBuilder spanBuilder = tracer.spanBuilder(spanNameKey) .setSpanKind(spanKind); if (remoteParentContext != null) { spanBuilder.setParent(io.opentelemetry.context.Context.root().with(Span.wrap(remoteParentContext))); } else { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (parentSpan == null) { parentSpan = Span.current(); } spanBuilder.setParent(io.opentelemetry.context.Context.current().with(parentSpan)); } if (beforeSaplingAttributes != null && !beforeSaplingAttributes.isEmpty()) { Attributes otelAttributes = convertToOtelAttributes(beforeSaplingAttributes); otelAttributes.forEach( (key, value) -> spanBuilder.setAttribute((AttributeKey<Object>) key, value)); } return spanBuilder; } private void endScope(Context context) { Scope scope = getOrDefault(context, SCOPE_KEY, null, Scope.class); if (scope != null) { scope.close(); } } /* * Converts our SpanKind to OpenTelemetry SpanKind. */ private SpanKind convertToOtelKind(StartSpanOptions.Kind kind) { return kind == StartSpanOptions.Kind.CLIENT ? SpanKind.CLIENT : SpanKind.INTERNAL; } /** * Maps span/event properties to OpenTelemetry attributes. * * @param attributes the attributes provided by the client SDK's. * @return the OpenTelemetry typed {@Link Attributes}. */ /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param context The context with current tracing span describing unique message context. * @return The {@link Context} containing the {@link SpanContext} and trace-parent of the current span. */ private Context setDiagnosticId(Context context) { Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return context; } SpanContext spanContext = span.getSpanContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); if (traceparent == null) { return context; } return context.addData(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. */ private void addMessagingAttributes(Span span, Context context) { Objects.requireNonNull(span, "'span' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); String entityPath = getOrDefault(context, ENTITY_PATH_KEY, null, String.class); if (entityPath != null) { span.setAttribute(MESSAGE_BUS_DESTINATION, entityPath); } String hostName = getOrDefault(context, HOST_NAME_KEY, null, String.class); if (hostName != null) { span.setAttribute(PEER_ENDPOINT, hostName); } Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class); if (messageEnqueuedTime != null) { span.setAttribute(MESSAGE_ENQUEUED_TIME, messageEnqueuedTime); } } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.verbose("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private final Tracer tracer = GlobalOpenTelemetry.getTracer("Azure-OpenTelemetry"); static final String AZ_NAMESPACE_KEY = "az.namespace"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); private static final AutoCloseable NOOP_CLOSEABLE = new AutoCloseable() { @Override public void close() { } }; /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); SpanBuilder spanBuilder = createSpanBuilder( spanName, null, SpanKind.INTERNAL, null, context); return startSpanInternal(spanBuilder, null, context); } /** * {@inheritDoc} */ @Override public Context start(String spanName, StartSpanOptions options, Context context) { Objects.requireNonNull(options, "'options' cannot be null."); SpanBuilder spanBuilder = createSpanBuilder( spanName, null, convertToOtelKind(options.getSpanKind()), options.getAttributes(), context); return startSpanInternal(spanBuilder, null, context); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, SpanBuilder.class); if (spanBuilder == null) { return context; } return startSpanInternal(spanBuilder, this::addMessagingAttributes, context); case MESSAGE: spanBuilder = createSpanBuilder(spanName, null, SpanKind.PRODUCER, null, context); context = startSpanInternal(spanBuilder, this::addMessagingAttributes, context); return setDiagnosticId(context); case PROCESS: SpanContext remoteParentContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); spanBuilder = createSpanBuilder(spanName, remoteParentContext, SpanKind.CONSUMER, null, context); context = startSpanInternal(spanBuilder, this::addMessagingAttributes, context); return context.addData(SCOPE_KEY, makeSpanCurrent(context)); default: return context; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.isRecording()) { span = HttpTraceUtil.setSpanStatus(span, responseCode, throwable); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { Objects.requireNonNull(context, "'context' cannot be null"); if (CoreUtils.isNullOrEmpty(value)) { logger.verbose("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.setAttribute(key, value); } else { logger.verbose("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.verbose("Failed to find span to end it."); return; } if (span.isRecording()) { span = AmqpTraceUtil.parseStatusMessage(span, statusMessage, throwable); } span.end(); endScope(context); } @Override public void addLink(Context context) { final SpanBuilder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, SpanBuilder.class); if (spanBuilder == null) { logger.verbose("Failed to find spanBuilder to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.verbose("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * {@inheritDoc} */ @Override public Context getSharedSpanBuilder(String spanName, Context context) { return context.addData(SPAN_BUILDER_KEY, createSpanBuilder(spanName, null, SpanKind.CLIENT, null, context)); } /** * {@inheritDoc} */ @Override public AutoCloseable makeSpanCurrent(Context context) { Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return NOOP_CLOSEABLE; } return span.makeCurrent(); } /** * {@inheritDoc} */ @Override @SuppressWarnings("deprecation") public void addEvent(String eventName, Map<String, Object> traceEventAttributes, OffsetDateTime timestamp) { addEvent(eventName, traceEventAttributes, timestamp, new Context(PARENT_SPAN_KEY, Span.current())); } /** * {@inheritDoc} */ @Override public void addEvent(String eventName, Map<String, Object> traceEventAttributes, OffsetDateTime timestamp, Context context) { Objects.requireNonNull(eventName, "'eventName' cannot be null."); Span currentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (currentSpan == null) { logger.verbose("Failed to find a starting span to associate the {} with.", eventName); return; } if (timestamp == null) { currentSpan.addEvent( eventName, traceEventAttributes == null ? Attributes.empty() : convertToOtelAttributes(traceEventAttributes)); } else { currentSpan.addEvent( eventName, traceEventAttributes == null ? Attributes.empty() : convertToOtelAttributes(traceEventAttributes), timestamp.toInstant() ); } } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent * being the designated {@link Span}. * * @param spanBuilder SpanBuilder for the span. Must be created before calling this method * @param setAttributes Callback to populate attributes for the span. * * @return A {@link Context} with created {@link Span}. */ private Context startSpanInternal(SpanBuilder spanBuilder, java.util.function.BiConsumer<Span, Context> setAttributes, Context context) { Objects.requireNonNull(spanBuilder, "'spanBuilder' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Span span = spanBuilder.startSpan(); if (span.isRecording()) { String tracingNamespace = getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, null, String.class); if (tracingNamespace != null) { span.setAttribute(AZ_NAMESPACE_KEY, tracingNamespace); } if (setAttributes != null) { setAttributes.accept(span, context); } } return context.addData(PARENT_SPAN_KEY, span); } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent * being the designated {@code Span}. * * @param spanName The name of the returned Span. * @param remoteParentContext Remote parent context if any, or {@code null} otherwise. * @param spanKind Kind of the span to create. * @param beforeSaplingAttributes Optional attributes available when span starts and important for sampling. * @param context The context containing the span and the span name. * @return A {@code Span.SpanBuilder} to create and start a new {@code Span}. */ @SuppressWarnings("unchecked") private SpanBuilder createSpanBuilder(String spanName, SpanContext remoteParentContext, SpanKind spanKind, Map<String, Object> beforeSaplingAttributes, Context context) { String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } SpanBuilder spanBuilder = tracer.spanBuilder(spanNameKey) .setSpanKind(spanKind); if (remoteParentContext != null) { spanBuilder.setParent(io.opentelemetry.context.Context.root().with(Span.wrap(remoteParentContext))); } else { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (parentSpan == null) { parentSpan = Span.current(); } spanBuilder.setParent(io.opentelemetry.context.Context.current().with(parentSpan)); } if (!CoreUtils.isNullOrEmpty(beforeSaplingAttributes)) { Attributes otelAttributes = convertToOtelAttributes(beforeSaplingAttributes); otelAttributes.forEach( (key, value) -> spanBuilder.setAttribute((AttributeKey<Object>) key, value)); } return spanBuilder; } /** * Ends current scope on the context. * @param context Context instance with the scope to end. */ private void endScope(Context context) { Scope scope = getOrDefault(context, SCOPE_KEY, null, Scope.class); if (scope != null) { scope.close(); } } /* * Converts our SpanKind to OpenTelemetry SpanKind. */ private SpanKind convertToOtelKind(StartSpanOptions.Kind kind) { return kind == StartSpanOptions.Kind.CLIENT ? SpanKind.CLIENT : SpanKind.INTERNAL; } /** * Maps span/event properties to OpenTelemetry attributes. * * @param attributes the attributes provided by the client SDK's. * @return the OpenTelemetry typed {@Link Attributes}. */ /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param context The context with current tracing span describing unique message context. * @return The {@link Context} containing the {@link SpanContext} and trace-parent of the current span. */ private Context setDiagnosticId(Context context) { Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return context; } SpanContext spanContext = span.getSpanContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); if (traceparent == null) { return context; } return context.addData(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. */ private void addMessagingAttributes(Span span, Context context) { Objects.requireNonNull(span, "'span' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); String entityPath = getOrDefault(context, ENTITY_PATH_KEY, null, String.class); if (entityPath != null) { span.setAttribute(MESSAGE_BUS_DESTINATION, entityPath); } String hostName = getOrDefault(context, HOST_NAME_KEY, null, String.class); if (hostName != null) { span.setAttribute(PEER_ENDPOINT, hostName); } Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class); if (messageEnqueuedTime != null) { span.setAttribute(MESSAGE_ENQUEUED_TIME, messageEnqueuedTime); } } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.verbose("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
Can remove this once the PR is ready for merging
private TestEnvironment() { this.testMode = readTestModeFromEnvironment(); this.serviceVersion = readServiceVersionFromEnvironment(); this.httpClientType = readHttpClientTypeFromEnvironment(); System.out.println(String.format("Tests will run with %s http client", this.httpClientType)); this.resourceGroupName = Configuration.getGlobalConfiguration().get("STORAGE_RESOURCE_GROUP_NAME"); this.subscriptionId = Configuration.getGlobalConfiguration().get("STORAGE_SUBSCRIPTION_ID"); this.primaryAccount = readTestAccountFromEnvironment("PRIMARY_STORAGE_", this.testMode); this.secondaryAccount = readTestAccountFromEnvironment("SECONDARY_STORAGE_", this.testMode); this.managedDiskAccount = readTestAccountFromEnvironment("MANAGED_DISK_STORAGE_", this.testMode); this.premiumAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_", this.testMode); this.versionedAccount = readTestAccountFromEnvironment("VERSIONED_STORAGE_", this.testMode); this.dataLakeAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_", this.testMode); this.premiumFileAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_FILE_", this.testMode); this.softDeleteAccount = readTestAccountFromEnvironment("SOFT_DELETE_STORAGE_", this.testMode); this.dataLakeSoftDeleteAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_SOFT_DELETE_", this.testMode); }
System.out.println(String.format("Tests will run with %s http client", this.httpClientType));
private TestEnvironment() { this.testMode = readTestModeFromEnvironment(); this.serviceVersion = readServiceVersionFromEnvironment(); this.httpClientType = readHttpClientTypeFromEnvironment(); System.out.println(String.format("Tests will run with %s http client", this.httpClientType)); this.resourceGroupName = Configuration.getGlobalConfiguration().get("STORAGE_RESOURCE_GROUP_NAME"); this.subscriptionId = Configuration.getGlobalConfiguration().get("STORAGE_SUBSCRIPTION_ID"); this.primaryAccount = readTestAccountFromEnvironment("PRIMARY_STORAGE_", this.testMode); this.secondaryAccount = readTestAccountFromEnvironment("SECONDARY_STORAGE_", this.testMode); this.managedDiskAccount = readTestAccountFromEnvironment("MANAGED_DISK_STORAGE_", this.testMode); this.premiumAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_", this.testMode); this.versionedAccount = readTestAccountFromEnvironment("VERSIONED_STORAGE_", this.testMode); this.dataLakeAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_", this.testMode); this.premiumFileAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_FILE_", this.testMode); this.softDeleteAccount = readTestAccountFromEnvironment("SOFT_DELETE_STORAGE_", this.testMode); this.dataLakeSoftDeleteAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_SOFT_DELETE_", this.testMode); }
class TestEnvironment { private static final ClientLogger LOGGER = new ClientLogger(TestEnvironment.class); private static final TestEnvironment INSTANCE = new TestEnvironment(); private final TestHttpClientType httpClientType; private final TestMode testMode; private final String serviceVersion; private final String resourceGroupName; private final String subscriptionId; private final TestAccount primaryAccount; private final TestAccount secondaryAccount; private final TestAccount managedDiskAccount; private final TestAccount premiumAccount; private final TestAccount versionedAccount; private final TestAccount dataLakeAccount; private final TestAccount premiumFileAccount; private final TestAccount softDeleteAccount; private final TestAccount dataLakeSoftDeleteAccount; public static TestEnvironment getInstance() { return INSTANCE; } private static TestMode readTestModeFromEnvironment() { String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE"); TestMode testMode; if (azureTestMode != null) { try { testMode = TestMode.valueOf(azureTestMode.toUpperCase(Locale.US)); } catch (IllegalArgumentException ignored) { LOGGER.error("Could not parse '{}' into TestMode. Using 'Playback' mode.", azureTestMode); testMode = TestMode.PLAYBACK; } } else { LOGGER.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE"); testMode = TestMode.PLAYBACK; } System.out.println(String.format("--------%s---------", testMode)); return testMode; } private String readServiceVersionFromEnvironment() { String serviceVersion = Configuration.getGlobalConfiguration().get("AZURE_LIVE_TEST_SERVICE_VERSION"); if (serviceVersion == null || serviceVersion.trim().isEmpty()) { System.out.println("Tests will run with default service version"); return null; } else { System.out.println(String.format("Tests will run with %s service version", serviceVersion)); return serviceVersion; } } private static TestAccount readTestAccountFromEnvironment(String prefix, TestMode testMode) { String name = "azstoragesdkaccount"; String key = "astorageaccountkey"; String connectionString = "DefaultEndpointsProtocol=https;AccountName=teststorage;" + "AccountKey=atestaccountkey;EndpointSuffix=core.windows.net"; if (testMode != TestMode.PLAYBACK) { name = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_NAME"); key = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_KEY"); connectionString = Configuration.getGlobalConfiguration().get(prefix + "CONNECTION_STRING"); if (connectionString == null || connectionString.trim().isEmpty()) { connectionString = String.format("DefaultEndpointsProtocol=https;AccountName=%s;" + "AccountKey=%s;EndpointSuffix=core.windows.net", name, key); } } String blobEndpoint = String.format("https: String blobEndpointSecondary = String.format("https: String dataLakeEndpoint = String.format("https: String queueEndpoint = String.format("https: String fileEndpoint = String.format("https: return new TestAccount(name, key, connectionString, blobEndpoint, blobEndpointSecondary, dataLakeEndpoint, queueEndpoint, fileEndpoint); } private static TestHttpClientType readHttpClientTypeFromEnvironment() { String httpClients = Configuration.getGlobalConfiguration().get("AZURE_TEST_HTTP_CLIENTS", "netty"); switch (httpClients.toLowerCase()) { case "netty": return TestHttpClientType.NETTY; case "okhttp": return TestHttpClientType.OK_HTTP; default: throw new IllegalArgumentException("Unknown value of AZURE_TEST_HTTP_CLIENTS: " + httpClients); } } public TestMode getTestMode() { return testMode; } public TestAccount getPrimaryAccount() { return primaryAccount; } public TestAccount getSecondaryAccount() { return secondaryAccount; } public TestAccount getPremiumAccount() { return premiumAccount; } public TestAccount getVersionedAccount() { return versionedAccount; } public TestAccount getManagedDiskAccount() { return managedDiskAccount; } public TestAccount getDataLakeAccount() { return dataLakeAccount; } public TestAccount getPremiumFileAccount() { return premiumFileAccount; } public TestAccount getSoftDeleteAccount() { return softDeleteAccount; } public TestAccount getDataLakeSoftDeleteAccount() { return dataLakeSoftDeleteAccount; } public String getServiceVersion() { return serviceVersion; } public String getResourceGroupName() { return resourceGroupName; } public String getSubscriptionId() { return subscriptionId; } public TestHttpClientType getHttpClientType() { return httpClientType; } }
class TestEnvironment { private static final ClientLogger LOGGER = new ClientLogger(TestEnvironment.class); private static final TestEnvironment INSTANCE = new TestEnvironment(); private final TestHttpClientType httpClientType; private final TestMode testMode; private final String serviceVersion; private final String resourceGroupName; private final String subscriptionId; private final TestAccount primaryAccount; private final TestAccount secondaryAccount; private final TestAccount managedDiskAccount; private final TestAccount premiumAccount; private final TestAccount versionedAccount; private final TestAccount dataLakeAccount; private final TestAccount premiumFileAccount; private final TestAccount softDeleteAccount; private final TestAccount dataLakeSoftDeleteAccount; public static TestEnvironment getInstance() { return INSTANCE; } private static TestMode readTestModeFromEnvironment() { String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE"); TestMode testMode; if (azureTestMode != null) { try { testMode = TestMode.valueOf(azureTestMode.toUpperCase(Locale.US)); } catch (IllegalArgumentException ignored) { LOGGER.error("Could not parse '{}' into TestMode. Using 'Playback' mode.", azureTestMode); testMode = TestMode.PLAYBACK; } } else { LOGGER.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE"); testMode = TestMode.PLAYBACK; } System.out.println(String.format("--------%s---------", testMode)); return testMode; } private String readServiceVersionFromEnvironment() { String serviceVersion = Configuration.getGlobalConfiguration().get("AZURE_LIVE_TEST_SERVICE_VERSION"); if (serviceVersion == null || serviceVersion.trim().isEmpty()) { System.out.println("Tests will run with default service version"); return null; } else { System.out.println(String.format("Tests will run with %s service version", serviceVersion)); return serviceVersion; } } private static TestAccount readTestAccountFromEnvironment(String prefix, TestMode testMode) { String name = "azstoragesdkaccount"; String key = "astorageaccountkey"; String connectionString = "DefaultEndpointsProtocol=https;AccountName=teststorage;" + "AccountKey=atestaccountkey;EndpointSuffix=core.windows.net"; if (testMode != TestMode.PLAYBACK) { name = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_NAME"); key = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_KEY"); connectionString = Configuration.getGlobalConfiguration().get(prefix + "CONNECTION_STRING"); if (connectionString == null || connectionString.trim().isEmpty()) { connectionString = String.format("DefaultEndpointsProtocol=https;AccountName=%s;" + "AccountKey=%s;EndpointSuffix=core.windows.net", name, key); } } String blobEndpoint = String.format("https: String blobEndpointSecondary = String.format("https: String dataLakeEndpoint = String.format("https: String queueEndpoint = String.format("https: String fileEndpoint = String.format("https: return new TestAccount(name, key, connectionString, blobEndpoint, blobEndpointSecondary, dataLakeEndpoint, queueEndpoint, fileEndpoint); } private static TestHttpClientType readHttpClientTypeFromEnvironment() { String httpClients = Configuration.getGlobalConfiguration().get("AZURE_TEST_HTTP_CLIENTS", "netty"); switch (httpClients.toLowerCase()) { case "netty": return TestHttpClientType.NETTY; case "okhttp": return TestHttpClientType.OK_HTTP; default: throw new IllegalArgumentException("Unknown value of AZURE_TEST_HTTP_CLIENTS: " + httpClients); } } public TestMode getTestMode() { return testMode; } public TestAccount getPrimaryAccount() { return primaryAccount; } public TestAccount getSecondaryAccount() { return secondaryAccount; } public TestAccount getPremiumAccount() { return premiumAccount; } public TestAccount getVersionedAccount() { return versionedAccount; } public TestAccount getManagedDiskAccount() { return managedDiskAccount; } public TestAccount getDataLakeAccount() { return dataLakeAccount; } public TestAccount getPremiumFileAccount() { return premiumFileAccount; } public TestAccount getSoftDeleteAccount() { return softDeleteAccount; } public TestAccount getDataLakeSoftDeleteAccount() { return dataLakeSoftDeleteAccount; } public String getServiceVersion() { return serviceVersion; } public String getResourceGroupName() { return resourceGroupName; } public String getSubscriptionId() { return subscriptionId; } public TestHttpClientType getHttpClientType() { return httpClientType; } }
I left this on purpose (like other printouts in this class are).
private TestEnvironment() { this.testMode = readTestModeFromEnvironment(); this.serviceVersion = readServiceVersionFromEnvironment(); this.httpClientType = readHttpClientTypeFromEnvironment(); System.out.println(String.format("Tests will run with %s http client", this.httpClientType)); this.resourceGroupName = Configuration.getGlobalConfiguration().get("STORAGE_RESOURCE_GROUP_NAME"); this.subscriptionId = Configuration.getGlobalConfiguration().get("STORAGE_SUBSCRIPTION_ID"); this.primaryAccount = readTestAccountFromEnvironment("PRIMARY_STORAGE_", this.testMode); this.secondaryAccount = readTestAccountFromEnvironment("SECONDARY_STORAGE_", this.testMode); this.managedDiskAccount = readTestAccountFromEnvironment("MANAGED_DISK_STORAGE_", this.testMode); this.premiumAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_", this.testMode); this.versionedAccount = readTestAccountFromEnvironment("VERSIONED_STORAGE_", this.testMode); this.dataLakeAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_", this.testMode); this.premiumFileAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_FILE_", this.testMode); this.softDeleteAccount = readTestAccountFromEnvironment("SOFT_DELETE_STORAGE_", this.testMode); this.dataLakeSoftDeleteAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_SOFT_DELETE_", this.testMode); }
System.out.println(String.format("Tests will run with %s http client", this.httpClientType));
private TestEnvironment() { this.testMode = readTestModeFromEnvironment(); this.serviceVersion = readServiceVersionFromEnvironment(); this.httpClientType = readHttpClientTypeFromEnvironment(); System.out.println(String.format("Tests will run with %s http client", this.httpClientType)); this.resourceGroupName = Configuration.getGlobalConfiguration().get("STORAGE_RESOURCE_GROUP_NAME"); this.subscriptionId = Configuration.getGlobalConfiguration().get("STORAGE_SUBSCRIPTION_ID"); this.primaryAccount = readTestAccountFromEnvironment("PRIMARY_STORAGE_", this.testMode); this.secondaryAccount = readTestAccountFromEnvironment("SECONDARY_STORAGE_", this.testMode); this.managedDiskAccount = readTestAccountFromEnvironment("MANAGED_DISK_STORAGE_", this.testMode); this.premiumAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_", this.testMode); this.versionedAccount = readTestAccountFromEnvironment("VERSIONED_STORAGE_", this.testMode); this.dataLakeAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_", this.testMode); this.premiumFileAccount = readTestAccountFromEnvironment("PREMIUM_STORAGE_FILE_", this.testMode); this.softDeleteAccount = readTestAccountFromEnvironment("SOFT_DELETE_STORAGE_", this.testMode); this.dataLakeSoftDeleteAccount = readTestAccountFromEnvironment("STORAGE_DATA_LAKE_SOFT_DELETE_", this.testMode); }
class TestEnvironment { private static final ClientLogger LOGGER = new ClientLogger(TestEnvironment.class); private static final TestEnvironment INSTANCE = new TestEnvironment(); private final TestHttpClientType httpClientType; private final TestMode testMode; private final String serviceVersion; private final String resourceGroupName; private final String subscriptionId; private final TestAccount primaryAccount; private final TestAccount secondaryAccount; private final TestAccount managedDiskAccount; private final TestAccount premiumAccount; private final TestAccount versionedAccount; private final TestAccount dataLakeAccount; private final TestAccount premiumFileAccount; private final TestAccount softDeleteAccount; private final TestAccount dataLakeSoftDeleteAccount; public static TestEnvironment getInstance() { return INSTANCE; } private static TestMode readTestModeFromEnvironment() { String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE"); TestMode testMode; if (azureTestMode != null) { try { testMode = TestMode.valueOf(azureTestMode.toUpperCase(Locale.US)); } catch (IllegalArgumentException ignored) { LOGGER.error("Could not parse '{}' into TestMode. Using 'Playback' mode.", azureTestMode); testMode = TestMode.PLAYBACK; } } else { LOGGER.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE"); testMode = TestMode.PLAYBACK; } System.out.println(String.format("--------%s---------", testMode)); return testMode; } private String readServiceVersionFromEnvironment() { String serviceVersion = Configuration.getGlobalConfiguration().get("AZURE_LIVE_TEST_SERVICE_VERSION"); if (serviceVersion == null || serviceVersion.trim().isEmpty()) { System.out.println("Tests will run with default service version"); return null; } else { System.out.println(String.format("Tests will run with %s service version", serviceVersion)); return serviceVersion; } } private static TestAccount readTestAccountFromEnvironment(String prefix, TestMode testMode) { String name = "azstoragesdkaccount"; String key = "astorageaccountkey"; String connectionString = "DefaultEndpointsProtocol=https;AccountName=teststorage;" + "AccountKey=atestaccountkey;EndpointSuffix=core.windows.net"; if (testMode != TestMode.PLAYBACK) { name = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_NAME"); key = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_KEY"); connectionString = Configuration.getGlobalConfiguration().get(prefix + "CONNECTION_STRING"); if (connectionString == null || connectionString.trim().isEmpty()) { connectionString = String.format("DefaultEndpointsProtocol=https;AccountName=%s;" + "AccountKey=%s;EndpointSuffix=core.windows.net", name, key); } } String blobEndpoint = String.format("https: String blobEndpointSecondary = String.format("https: String dataLakeEndpoint = String.format("https: String queueEndpoint = String.format("https: String fileEndpoint = String.format("https: return new TestAccount(name, key, connectionString, blobEndpoint, blobEndpointSecondary, dataLakeEndpoint, queueEndpoint, fileEndpoint); } private static TestHttpClientType readHttpClientTypeFromEnvironment() { String httpClients = Configuration.getGlobalConfiguration().get("AZURE_TEST_HTTP_CLIENTS", "netty"); switch (httpClients.toLowerCase()) { case "netty": return TestHttpClientType.NETTY; case "okhttp": return TestHttpClientType.OK_HTTP; default: throw new IllegalArgumentException("Unknown value of AZURE_TEST_HTTP_CLIENTS: " + httpClients); } } public TestMode getTestMode() { return testMode; } public TestAccount getPrimaryAccount() { return primaryAccount; } public TestAccount getSecondaryAccount() { return secondaryAccount; } public TestAccount getPremiumAccount() { return premiumAccount; } public TestAccount getVersionedAccount() { return versionedAccount; } public TestAccount getManagedDiskAccount() { return managedDiskAccount; } public TestAccount getDataLakeAccount() { return dataLakeAccount; } public TestAccount getPremiumFileAccount() { return premiumFileAccount; } public TestAccount getSoftDeleteAccount() { return softDeleteAccount; } public TestAccount getDataLakeSoftDeleteAccount() { return dataLakeSoftDeleteAccount; } public String getServiceVersion() { return serviceVersion; } public String getResourceGroupName() { return resourceGroupName; } public String getSubscriptionId() { return subscriptionId; } public TestHttpClientType getHttpClientType() { return httpClientType; } }
class TestEnvironment { private static final ClientLogger LOGGER = new ClientLogger(TestEnvironment.class); private static final TestEnvironment INSTANCE = new TestEnvironment(); private final TestHttpClientType httpClientType; private final TestMode testMode; private final String serviceVersion; private final String resourceGroupName; private final String subscriptionId; private final TestAccount primaryAccount; private final TestAccount secondaryAccount; private final TestAccount managedDiskAccount; private final TestAccount premiumAccount; private final TestAccount versionedAccount; private final TestAccount dataLakeAccount; private final TestAccount premiumFileAccount; private final TestAccount softDeleteAccount; private final TestAccount dataLakeSoftDeleteAccount; public static TestEnvironment getInstance() { return INSTANCE; } private static TestMode readTestModeFromEnvironment() { String azureTestMode = Configuration.getGlobalConfiguration().get("AZURE_TEST_MODE"); TestMode testMode; if (azureTestMode != null) { try { testMode = TestMode.valueOf(azureTestMode.toUpperCase(Locale.US)); } catch (IllegalArgumentException ignored) { LOGGER.error("Could not parse '{}' into TestMode. Using 'Playback' mode.", azureTestMode); testMode = TestMode.PLAYBACK; } } else { LOGGER.info("Environment variable '{}' has not been set yet. Using 'Playback' mode.", "AZURE_TEST_MODE"); testMode = TestMode.PLAYBACK; } System.out.println(String.format("--------%s---------", testMode)); return testMode; } private String readServiceVersionFromEnvironment() { String serviceVersion = Configuration.getGlobalConfiguration().get("AZURE_LIVE_TEST_SERVICE_VERSION"); if (serviceVersion == null || serviceVersion.trim().isEmpty()) { System.out.println("Tests will run with default service version"); return null; } else { System.out.println(String.format("Tests will run with %s service version", serviceVersion)); return serviceVersion; } } private static TestAccount readTestAccountFromEnvironment(String prefix, TestMode testMode) { String name = "azstoragesdkaccount"; String key = "astorageaccountkey"; String connectionString = "DefaultEndpointsProtocol=https;AccountName=teststorage;" + "AccountKey=atestaccountkey;EndpointSuffix=core.windows.net"; if (testMode != TestMode.PLAYBACK) { name = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_NAME"); key = Configuration.getGlobalConfiguration().get(prefix + "ACCOUNT_KEY"); connectionString = Configuration.getGlobalConfiguration().get(prefix + "CONNECTION_STRING"); if (connectionString == null || connectionString.trim().isEmpty()) { connectionString = String.format("DefaultEndpointsProtocol=https;AccountName=%s;" + "AccountKey=%s;EndpointSuffix=core.windows.net", name, key); } } String blobEndpoint = String.format("https: String blobEndpointSecondary = String.format("https: String dataLakeEndpoint = String.format("https: String queueEndpoint = String.format("https: String fileEndpoint = String.format("https: return new TestAccount(name, key, connectionString, blobEndpoint, blobEndpointSecondary, dataLakeEndpoint, queueEndpoint, fileEndpoint); } private static TestHttpClientType readHttpClientTypeFromEnvironment() { String httpClients = Configuration.getGlobalConfiguration().get("AZURE_TEST_HTTP_CLIENTS", "netty"); switch (httpClients.toLowerCase()) { case "netty": return TestHttpClientType.NETTY; case "okhttp": return TestHttpClientType.OK_HTTP; default: throw new IllegalArgumentException("Unknown value of AZURE_TEST_HTTP_CLIENTS: " + httpClients); } } public TestMode getTestMode() { return testMode; } public TestAccount getPrimaryAccount() { return primaryAccount; } public TestAccount getSecondaryAccount() { return secondaryAccount; } public TestAccount getPremiumAccount() { return premiumAccount; } public TestAccount getVersionedAccount() { return versionedAccount; } public TestAccount getManagedDiskAccount() { return managedDiskAccount; } public TestAccount getDataLakeAccount() { return dataLakeAccount; } public TestAccount getPremiumFileAccount() { return premiumFileAccount; } public TestAccount getSoftDeleteAccount() { return softDeleteAccount; } public TestAccount getDataLakeSoftDeleteAccount() { return dataLakeSoftDeleteAccount; } public String getServiceVersion() { return serviceVersion; } public String getResourceGroupName() { return resourceGroupName; } public String getSubscriptionId() { return subscriptionId; } public TestHttpClientType getHttpClientType() { return httpClientType; } }